aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.mailmap1
-rw-r--r--CREDITS8
-rw-r--r--Documentation/ABI/testing/sysfs-bus-optee-devices9
-rw-r--r--Documentation/ABI/testing/sysfs-class-led9
-rw-r--r--Documentation/accel/qaic/qaic.rst9
-rw-r--r--Documentation/core-api/pin_user_pages.rst2
-rw-r--r--Documentation/devicetree/bindings/display/bridge/adi,adv7533.yaml6
-rw-r--r--Documentation/devicetree/bindings/display/bridge/lontium,lt8912b.yaml21
-rw-r--r--Documentation/devicetree/bindings/display/fsl,lcdif.yaml20
-rw-r--r--Documentation/devicetree/bindings/display/mediatek/mediatek,dsi.yaml1
-rw-r--r--Documentation/devicetree/bindings/display/panel/himax,hx8394.yaml3
-rw-r--r--Documentation/devicetree/bindings/display/panel/ilitek,ili9881c.yaml1
-rw-r--r--Documentation/devicetree/bindings/display/panel/panel-simple.yaml4
-rw-r--r--Documentation/devicetree/bindings/display/ti/ti,am65x-dss.yaml14
-rw-r--r--Documentation/devicetree/bindings/gpu/arm,mali-utgard.yaml1
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/qcom,mpm.yaml4
-rw-r--r--Documentation/devicetree/bindings/perf/riscv,pmu.yaml2
-rw-r--r--Documentation/devicetree/bindings/pinctrl/nxp,s32g2-siul2-pinctrl.yaml2
-rw-r--r--Documentation/devicetree/bindings/pwm/imx-pwm.yaml10
-rw-r--r--Documentation/devicetree/bindings/soc/rockchip/grf.yaml1
-rw-r--r--Documentation/gpu/drm-kms.rst8
-rw-r--r--Documentation/gpu/drm-mm.rst4
-rw-r--r--Documentation/gpu/drm-vm-bind-locking.rst582
-rw-r--r--Documentation/gpu/imagination/index.rst2
-rw-r--r--Documentation/gpu/imagination/uapi.rst5
-rw-r--r--Documentation/gpu/implementation_guidelines.rst1
-rw-r--r--Documentation/gpu/rfc/xe.rst5
-rw-r--r--Documentation/gpu/todo.rst30
-rw-r--r--Documentation/networking/tcp_ao.rst2
-rw-r--r--Documentation/trace/coresight/coresight.rst2
-rw-r--r--MAINTAINERS202
-rw-r--r--Makefile2
-rw-r--r--arch/arm/boot/dts/broadcom/bcm2711-rpi-400.dts4
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx6q-skov-reve-mi1010ait-1cp1.dts4
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx6ul-pico.dtsi2
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx7s.dtsi8
-rw-r--r--arch/arm/boot/dts/nxp/mxs/imx28-xea.dts1
-rw-r--r--arch/arm/boot/dts/rockchip/rk3128.dtsi2
-rw-r--r--arch/arm/boot/dts/rockchip/rk322x.dtsi6
-rw-r--r--arch/arm/include/asm/kexec.h4
-rw-r--r--arch/arm/kernel/Makefile2
-rw-r--r--arch/arm/mach-imx/mmdc.c7
-rw-r--r--arch/arm64/boot/dts/freescale/imx8-apalis-v1.1.dtsi5
-rw-r--r--arch/arm64/boot/dts/freescale/imx8-ss-dma.dtsi2
-rw-r--r--arch/arm64/boot/dts/freescale/imx8-ss-lsio.dtsi8
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mp.dtsi2
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mq.dtsi2
-rw-r--r--arch/arm64/boot/dts/freescale/imx8qm-ss-dma.dtsi11
-rw-r--r--arch/arm64/boot/dts/freescale/imx8ulp.dtsi6
-rw-r--r--arch/arm64/boot/dts/freescale/imx93-tqma9352-mba93xxla.dts2
-rw-r--r--arch/arm64/boot/dts/freescale/imx93.dtsi10
-rw-r--r--arch/arm64/boot/dts/mediatek/mt7622-bananapi-bpi-r64.dts2
-rw-r--r--arch/arm64/boot/dts/mediatek/mt7622-rfb1.dts2
-rw-r--r--arch/arm64/boot/dts/mediatek/mt7986a-bananapi-bpi-r3.dts12
-rw-r--r--arch/arm64/boot/dts/mediatek/mt7986a.dtsi24
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8173-evb.dts4
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8183-evb.dts4
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi.dtsi8
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8183-kukui.dtsi96
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8183.dtsi242
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8186.dtsi44
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8195-cherry.dtsi2
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8195.dtsi6
-rw-r--r--arch/arm64/boot/dts/rockchip/px30-ringneck-haikou.dts2
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3328.dtsi2
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-gru-chromebook.dtsi3
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-gru-scarlet-dumo.dts4
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-gru.dtsi1
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399.dtsi6
-rw-r--r--arch/arm64/boot/dts/rockchip/rk356x.dtsi2
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3588-turing-rk1.dtsi4
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3588s-orangepi-5.dts2
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3588s-pinctrl.dtsi2
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3588s.dtsi1
-rw-r--r--arch/arm64/kernel/cpufeature.c4
-rw-r--r--arch/arm64/kvm/vgic/vgic-v4.c4
-rw-r--r--arch/loongarch/Makefile2
-rw-r--r--arch/loongarch/include/asm/elf.h2
-rw-r--r--arch/loongarch/include/asm/loongarch.h5
-rw-r--r--arch/loongarch/kernel/stacktrace.c2
-rw-r--r--arch/loongarch/kernel/unwind.c1
-rw-r--r--arch/loongarch/kernel/unwind_prologue.c2
-rw-r--r--arch/loongarch/net/bpf_jit.c18
-rw-r--r--arch/mips/Kconfig2
-rw-r--r--arch/mips/include/asm/mach-loongson64/boot_param.h9
-rw-r--r--arch/mips/kernel/process.c25
-rw-r--r--arch/mips/kernel/smp.c4
-rw-r--r--arch/mips/loongson64/env.c10
-rw-r--r--arch/mips/loongson64/init.c47
-rw-r--r--arch/parisc/include/asm/bug.h2
-rw-r--r--arch/powerpc/kernel/fpu.S13
-rw-r--r--arch/powerpc/kernel/process.c6
-rw-r--r--arch/powerpc/kernel/trace/ftrace_entry.S4
-rw-r--r--arch/powerpc/kernel/vector.S2
-rw-r--r--arch/riscv/boot/dts/microchip/mpfs-icicle-kit.dts7
-rw-r--r--arch/riscv/boot/dts/microchip/mpfs-m100pfsevp.dts7
-rw-r--r--arch/riscv/boot/dts/microchip/mpfs-polarberry.dts7
-rw-r--r--arch/riscv/boot/dts/microchip/mpfs-sev-kit.dts7
-rw-r--r--arch/riscv/boot/dts/microchip/mpfs-tysom-m.dts7
-rw-r--r--arch/riscv/boot/dts/microchip/mpfs.dtsi1
-rw-r--r--arch/riscv/boot/dts/sophgo/cv1800b.dtsi1
-rw-r--r--arch/riscv/errata/andes/errata.c20
-rw-r--r--arch/riscv/kernel/head.S2
-rw-r--r--arch/riscv/kernel/module.c114
-rw-r--r--arch/riscv/kernel/sys_riscv.c2
-rw-r--r--arch/riscv/kernel/tests/module_test/test_uleb128.S8
-rw-r--r--arch/riscv/kernel/traps_misaligned.c6
-rw-r--r--arch/s390/kvm/vsie.c4
-rw-r--r--arch/s390/mm/pgtable.c2
-rw-r--r--arch/x86/coco/tdx/tdx.c1
-rw-r--r--arch/x86/entry/common.c93
-rw-r--r--arch/x86/entry/entry_64_compat.S77
-rw-r--r--arch/x86/include/asm/ia32.h7
-rw-r--r--arch/x86/include/asm/idtentry.h4
-rw-r--r--arch/x86/include/asm/proto.h4
-rw-r--r--arch/x86/kernel/cpu/amd.c3
-rw-r--r--arch/x86/kernel/idt.c2
-rw-r--r--arch/x86/kernel/sev.c11
-rw-r--r--arch/x86/kvm/debugfs.c1
-rw-r--r--arch/x86/kvm/svm/svm.c8
-rw-r--r--arch/x86/kvm/x86.c9
-rw-r--r--arch/x86/mm/mem_encrypt_amd.c11
-rw-r--r--arch/x86/net/bpf_jit_comp.c46
-rw-r--r--arch/x86/xen/enlighten.c6
-rw-r--r--arch/x86/xen/enlighten_pv.c2
-rw-r--r--arch/x86/xen/xen-asm.S2
-rw-r--r--arch/x86/xen/xen-ops.h2
-rw-r--r--block/blk-core.c14
-rw-r--r--block/blk-mq.c14
-rw-r--r--block/blk-sysfs.c2
-rw-r--r--drivers/accel/drm_accel.c1
-rw-r--r--drivers/accel/qaic/mhi_controller.c2
-rw-r--r--drivers/accel/qaic/qaic.h15
-rw-r--r--drivers/accel/qaic/qaic_control.c5
-rw-r--r--drivers/accel/qaic/qaic_data.c16
-rw-r--r--drivers/accel/qaic/qaic_drv.c50
-rw-r--r--drivers/acpi/acpi_video.c14
-rw-r--r--drivers/acpi/scan.c7
-rw-r--r--drivers/acpi/utils.c2
-rw-r--r--drivers/ata/libata-scsi.c9
-rw-r--r--drivers/auxdisplay/Kconfig10
-rw-r--r--drivers/auxdisplay/cfag12864bfb.c10
-rw-r--r--drivers/auxdisplay/ht16k33.c10
-rw-r--r--drivers/base/cpu.c6
-rw-r--r--drivers/base/devcoredump.c3
-rw-r--r--drivers/base/memory.c18
-rw-r--r--drivers/base/regmap/regcache.c3
-rw-r--r--drivers/char/agp/Makefile6
-rw-r--r--drivers/char/agp/agp.h9
-rw-r--r--drivers/char/agp/backend.c11
-rw-r--r--drivers/char/agp/compat_ioctl.c291
-rw-r--r--drivers/char/agp/compat_ioctl.h106
-rw-r--r--drivers/char/agp/frontend.c1068
-rw-r--r--drivers/cpufreq/amd-pstate.c71
-rw-r--r--drivers/cpufreq/imx6q-cpufreq.c2
-rw-r--r--drivers/cpufreq/qcom-cpufreq-nvmem.c73
-rw-r--r--drivers/dma-buf/dma-fence.c3
-rw-r--r--drivers/dma-buf/dma-resv.c2
-rw-r--r--drivers/dma-buf/sw_sync.c82
-rw-r--r--drivers/dma-buf/sync_debug.h2
-rw-r--r--drivers/dma-buf/sync_file.c19
-rw-r--r--drivers/firewire/core-device.c11
-rw-r--r--drivers/firewire/sbp2.c6
-rw-r--r--drivers/firmware/arm_ffa/driver.c70
-rw-r--r--drivers/firmware/arm_scmi/perf.c18
-rw-r--r--drivers/firmware/efi/unaccepted_memory.c2
-rw-r--r--drivers/gpio/gpiolib-sysfs.c15
-rw-r--r--drivers/gpu/drm/Kconfig24
-rw-r--r--drivers/gpu/drm/Makefile12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_job.c2
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c1
-rw-r--r--drivers/gpu/drm/armada/armada_crtc.c24
-rw-r--r--drivers/gpu/drm/aspeed/aspeed_gfx_drv.c10
-rw-r--r--drivers/gpu/drm/ast/ast_drv.c263
-rw-r--r--drivers/gpu/drm/ast/ast_drv.h101
-rw-r--r--drivers/gpu/drm/ast/ast_main.c244
-rw-r--r--drivers/gpu/drm/ast/ast_mode.c26
-rw-r--r--drivers/gpu/drm/ast/ast_post.c73
-rw-r--r--drivers/gpu/drm/ast/ast_reg.h12
-rw-r--r--drivers/gpu/drm/bridge/Kconfig18
-rw-r--r--drivers/gpu/drm/bridge/Makefile2
-rw-r--r--drivers/gpu/drm/bridge/analogix/anx7625.c54
-rw-r--r--drivers/gpu/drm/bridge/analogix/anx7625.h4
-rw-r--r--drivers/gpu/drm/bridge/aux-bridge.c140
-rw-r--r--drivers/gpu/drm/bridge/aux-hpd-bridge.c163
-rw-r--r--drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-hdcp.c3
-rw-r--r--drivers/gpu/drm/bridge/imx/imx93-mipi-dsi.c4
-rw-r--r--drivers/gpu/drm/bridge/lontium-lt8912b.c58
-rw-r--r--drivers/gpu/drm/bridge/nxp-ptn3460.c6
-rw-r--r--drivers/gpu/drm/bridge/panel.c17
-rw-r--r--drivers/gpu/drm/bridge/tc358767.c2
-rw-r--r--drivers/gpu/drm/bridge/ti-sn65dsi86.c6
-rw-r--r--drivers/gpu/drm/drm_agpsupport.c451
-rw-r--r--drivers/gpu/drm/drm_atomic.c9
-rw-r--r--drivers/gpu/drm/drm_atomic_helper.c78
-rw-r--r--drivers/gpu/drm/drm_atomic_state_helper.c14
-rw-r--r--drivers/gpu/drm/drm_atomic_uapi.c97
-rw-r--r--drivers/gpu/drm/drm_auth.c8
-rw-r--r--drivers/gpu/drm/drm_bridge.c44
-rw-r--r--drivers/gpu/drm/drm_bridge_connector.c6
-rw-r--r--drivers/gpu/drm/drm_bufs.c1627
-rw-r--r--drivers/gpu/drm/drm_context.c513
-rw-r--r--drivers/gpu/drm/drm_crtc_helper.c7
-rw-r--r--drivers/gpu/drm/drm_crtc_internal.h2
-rw-r--r--drivers/gpu/drm/drm_damage_helper.c3
-rw-r--r--drivers/gpu/drm/drm_debugfs.c65
-rw-r--r--drivers/gpu/drm/drm_dma.c178
-rw-r--r--drivers/gpu/drm/drm_drv.c27
-rw-r--r--drivers/gpu/drm/drm_encoder.c4
-rw-r--r--drivers/gpu/drm/drm_file.c66
-rw-r--r--drivers/gpu/drm/drm_framebuffer.c2
-rw-r--r--drivers/gpu/drm/drm_gpuvm.c53
-rw-r--r--drivers/gpu/drm/drm_hashtab.c203
-rw-r--r--drivers/gpu/drm/drm_internal.h17
-rw-r--r--drivers/gpu/drm/drm_ioc32.c613
-rw-r--r--drivers/gpu/drm/drm_ioctl.c95
-rw-r--r--drivers/gpu/drm/drm_irq.c204
-rw-r--r--drivers/gpu/drm/drm_legacy.h290
-rw-r--r--drivers/gpu/drm/drm_legacy_misc.c105
-rw-r--r--drivers/gpu/drm/drm_lock.c373
-rw-r--r--drivers/gpu/drm/drm_memory.c138
-rw-r--r--drivers/gpu/drm/drm_mipi_dsi.c17
-rw-r--r--drivers/gpu/drm/drm_mode_object.c2
-rw-r--r--drivers/gpu/drm/drm_pci.c204
-rw-r--r--drivers/gpu/drm/drm_plane.c150
-rw-r--r--drivers/gpu/drm/drm_plane_helper.c32
-rw-r--r--drivers/gpu/drm/drm_prime.c33
-rw-r--r--drivers/gpu/drm/drm_scatter.c220
-rw-r--r--drivers/gpu/drm/drm_syncobj.c64
-rw-r--r--drivers/gpu/drm/drm_vblank.c101
-rw-r--r--drivers/gpu/drm/drm_vm.c665
-rw-r--r--drivers/gpu/drm/exynos/exynos5433_drm_decon.c6
-rw-r--r--drivers/gpu/drm/exynos/exynos7_drm_decon.c6
-rw-r--r--drivers/gpu/drm/exynos/exynos_dp.c6
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dma.c8
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dpi.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.c16
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimc.c6
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimd.c6
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_g2d.c6
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gsc.c15
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_mic.c6
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_rotator.c6
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_scaler.c6
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_vidi.c6
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmi.c8
-rw-r--r--drivers/gpu/drm/exynos/exynos_mixer.c6
-rw-r--r--drivers/gpu/drm/i915/Kconfig.debug18
-rw-r--r--drivers/gpu/drm/i915/Makefile4
-rw-r--r--drivers/gpu/drm/i915/display/i9xx_wm.c8
-rw-r--r--drivers/gpu/drm/i915/display/icl_dsi.c7
-rw-r--r--drivers/gpu/drm/i915/display/intel_bios.c5
-rw-r--r--drivers/gpu/drm/i915/display/intel_bw.c7
-rw-r--r--drivers/gpu/drm/i915/display/intel_cdclk.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_crt.c5
-rw-r--r--drivers/gpu/drm/i915/display/intel_crtc_state_dump.c9
-rw-r--r--drivers/gpu/drm/i915/display/intel_cursor.c42
-rw-r--r--drivers/gpu/drm/i915/display/intel_cx0_phy.c11
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.c36
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.h3
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_debugfs.c165
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_device.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_irq.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power.c8
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.c13
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_mst.c22
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll_mgr.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsb.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsi_vbt.c17
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsi_vbt.h1
-rw-r--r--drivers/gpu/drm/i915/display/intel_dvo.c6
-rw-r--r--drivers/gpu/drm/i915/display/intel_fb.c163
-rw-r--r--drivers/gpu/drm/i915/display/intel_fb.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_fb_bo.c97
-rw-r--r--drivers/gpu/drm/i915/display/intel_fb_bo.h26
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbc.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbdev.c112
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbdev_fb.c115
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbdev_fb.h21
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdmi.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_lvds.c7
-rw-r--r--drivers/gpu/drm/i915/display/intel_psr.c96
-rw-r--r--drivers/gpu/drm/i915/display/intel_psr.h10
-rw-r--r--drivers/gpu/drm/i915/display/intel_sdvo.c8
-rw-r--r--drivers/gpu/drm/i915/display/intel_snps_phy.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_tv.c8
-rw-r--r--drivers/gpu/drm/i915/display/intel_vblank.c51
-rw-r--r--drivers/gpu/drm/i915/display/skl_universal_plane.c78
-rw-r--r--drivers/gpu/drm/i915/display/vlv_dsi.c26
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_context.c11
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_context_types.h3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c17
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object.c13
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object_types.h12
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_stolen.c21
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c10
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c14
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/mock_context.c4
-rw-r--r--drivers/gpu/drm/i915/gt/gen8_ppgtt.c43
-rw-r--r--drivers/gpu/drm/i915/gt/intel_breadcrumbs.c13
-rw-r--r--drivers/gpu/drm/i915/gt/intel_breadcrumbs_types.h3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_context.c14
-rw-r--r--drivers/gpu/drm/i915/gt/intel_context.h4
-rw-r--r--drivers/gpu/drm/i915/gt/intel_context_types.h2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_cs.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_pm.c7
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_regs.h8
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_types.h2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_user.c39
-rw-r--r--drivers/gpu/drm/i915/gt/intel_execlists_submission.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ggtt.c23
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt.h9
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_mcr.c3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_pm.c14
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_pm.h38
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c4
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_regs.h3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gtt.c26
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gtt.h5
-rw-r--r--drivers/gpu/drm/i915/gt/intel_lrc.c100
-rw-r--r--drivers/gpu/drm/i915/gt/intel_reset.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_sseu.c7
-rw-r--r--drivers/gpu/drm/i915/gt/intel_workarounds.c38
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_engine_cs.c20
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c2
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_gt_pm.c5
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_lrc.c65
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_reset.c10
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_rps.c17
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_slpc.c5
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_gsc_proxy.c2
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc.c2
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc.h4
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c2
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c11
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_log.c10
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_rc.c2
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c2
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c23
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc.c5
-rw-r--r--drivers/gpu/drm/i915/gt/uc/selftest_guc.c115
-rw-r--r--drivers/gpu/drm/i915/gt/uc/selftest_guc_hangcheck.c2
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c108
-rw-r--r--drivers/gpu/drm/i915/i915_driver.c6
-rw-r--r--drivers/gpu/drm/i915/i915_drm_client.c108
-rw-r--r--drivers/gpu/drm/i915/i915_drm_client.h42
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c194
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.h44
-rw-r--r--drivers/gpu/drm/i915/i915_pmu.c77
-rw-r--r--drivers/gpu/drm/i915/i915_sysfs.c79
-rw-r--r--drivers/gpu/drm/i915/intel_memory_region.c19
-rw-r--r--drivers/gpu/drm/i915/intel_memory_region.h1
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.c243
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.h11
-rw-r--r--drivers/gpu/drm/i915/intel_wakeref.c35
-rw-r--r--drivers/gpu/drm/i915/intel_wakeref.h73
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp.c18
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp_irq.c5
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp_session.c6
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp_types.h1
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_syncmap.c2
-rw-r--r--drivers/gpu/drm/i915/selftests/igt_live_test.c9
-rw-r--r--drivers/gpu/drm/i915/selftests/igt_live_test.h3
-rw-r--r--drivers/gpu/drm/imagination/pvr_cccb.h1
-rw-r--r--drivers/gpu/drm/imagination/pvr_device.c10
-rw-r--r--drivers/gpu/drm/imagination/pvr_device.h25
-rw-r--r--drivers/gpu/drm/imagination/pvr_device_info.c3
-rw-r--r--drivers/gpu/drm/imagination/pvr_free_list.c2
-rw-r--r--drivers/gpu/drm/imagination/pvr_fw.h3
-rw-r--r--drivers/gpu/drm/imagination/pvr_fw_info.h8
-rw-r--r--drivers/gpu/drm/imagination/pvr_fw_meta.c1
-rw-r--r--drivers/gpu/drm/imagination/pvr_fw_trace.c48
-rw-r--r--drivers/gpu/drm/imagination/pvr_hwrt.h1
-rw-r--r--drivers/gpu/drm/imagination/pvr_job.c4
-rw-r--r--drivers/gpu/drm/imagination/pvr_mmu.c3
-rw-r--r--drivers/gpu/drm/imagination/pvr_queue.c2
-rw-r--r--drivers/gpu/drm/imagination/pvr_queue.h4
-rw-r--r--drivers/gpu/drm/imagination/pvr_rogue_fwif_sf.h4
-rw-r--r--drivers/gpu/drm/imagination/pvr_vm.c131
-rw-r--r--drivers/gpu/drm/imagination/pvr_vm.h3
-rw-r--r--drivers/gpu/drm/imagination/pvr_vm_mips.c11
-rw-r--r--drivers/gpu/drm/imx/ipuv3/imx-ldb.c9
-rw-r--r--drivers/gpu/drm/imx/lcdc/imx-lcdc.c9
-rw-r--r--drivers/gpu/drm/loongson/Kconfig1
-rw-r--r--drivers/gpu/drm/loongson/lsdc_plane.c1
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.h2
-rw-r--r--drivers/gpu/drm/mxsfb/mxsfb_drv.c10
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/disp.c2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/shared/msgq/inc/msgq/msgq_priv.h51
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_os_nvoc.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_abi16.c19
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_abi16.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c9
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c31
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h9
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_exec.c68
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_exec.h6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_sched.c207
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_sched.h43
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_uvmm.c210
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_uvmm.h4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c88
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmtu102.c2
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dispc.c4
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dss.c5
-rw-r--r--drivers/gpu/drm/omapdrm/omap_gem.c14
-rw-r--r--drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c1
-rw-r--r--drivers/gpu/drm/panel/panel-edp.c83
-rw-r--r--drivers/gpu/drm/panel/panel-himax-hx8394.c180
-rw-r--r--drivers/gpu/drm/panel/panel-ilitek-ili9881c.c225
-rw-r--r--drivers/gpu/drm/panel/panel-novatek-nt36523.c4
-rw-r--r--drivers/gpu/drm/panel/panel-simple.c62
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_devfreq.c17
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_device.c3
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_device.h10
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_gem.c2
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_gpu.c40
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_gpu.h1
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_job.c26
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_job.h1
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_mmu.c32
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_mmu.h1
-rw-r--r--drivers/gpu/drm/qxl/qxl_display.c14
-rw-r--r--drivers/gpu/drm/qxl/qxl_drv.c2
-rw-r--r--drivers/gpu/drm/qxl/qxl_drv.h7
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.h1
-rw-r--r--drivers/gpu/drm/renesas/shmobile/shmob_drm_plane.c1
-rw-r--r--drivers/gpu/drm/scheduler/sched_entity.c14
-rw-r--r--drivers/gpu/drm/scheduler/sched_main.c20
-rw-r--r--drivers/gpu/drm/solomon/ssd130x.h1
-rw-r--r--drivers/gpu/drm/tests/Makefile1
-rw-r--r--drivers/gpu/drm/tests/drm_gem_shmem_test.c383
-rw-r--r--drivers/gpu/drm/tidss/tidss_crtc.c12
-rw-r--r--drivers/gpu/drm/tidss/tidss_dispc.c138
-rw-r--r--drivers/gpu/drm/tidss/tidss_dispc.h3
-rw-r--r--drivers/gpu/drm/tidss/tidss_drv.c16
-rw-r--r--drivers/gpu/drm/tidss/tidss_irq.c54
-rw-r--r--drivers/gpu/drm/tidss/tidss_kms.c2
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_drv.c9
-rw-r--r--drivers/gpu/drm/tiny/ofdrm.c1
-rw-r--r--drivers/gpu/drm/tiny/simpledrm.c1
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c8
-rw-r--r--drivers/gpu/drm/ttm/ttm_device.c6
-rw-r--r--drivers/gpu/drm/udl/udl_modeset.c19
-rw-r--r--drivers/gpu/drm/v3d/Makefile3
-rw-r--r--drivers/gpu/drm/v3d/v3d_bo.c51
-rw-r--r--drivers/gpu/drm/v3d/v3d_drv.c4
-rw-r--r--drivers/gpu/drm/v3d/v3d_drv.h134
-rw-r--r--drivers/gpu/drm/v3d/v3d_gem.c768
-rw-r--r--drivers/gpu/drm/v3d/v3d_sched.c316
-rw-r--r--drivers/gpu/drm/v3d/v3d_submit.c1320
-rw-r--r--drivers/gpu/drm/v3d/v3d_trace.h57
-rw-r--r--drivers/gpu/drm/vboxvideo/vbox_drv.c2
-rw-r--r--drivers/gpu/drm/vboxvideo/vbox_mode.c4
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_drv.c2
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_plane.c18
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c20
-rw-r--r--drivers/gpu/drm/xlnx/zynqmp_kms.c1
-rw-r--r--drivers/greybus/Kconfig1
-rw-r--r--drivers/hid/hid-picolcd_fb.c1
-rw-r--r--drivers/hwmon/acpi_power_meter.c4
-rw-r--r--drivers/hwmon/corsair-psu.c18
-rw-r--r--drivers/hwmon/ltc2991.c2
-rw-r--r--drivers/hwmon/max31827.c1
-rw-r--r--drivers/hwmon/nzxt-kraken2.c4
-rw-r--r--drivers/hwtracing/coresight/coresight-etm-perf.c4
-rw-r--r--drivers/hwtracing/coresight/coresight-etm4x-core.c6
-rw-r--r--drivers/hwtracing/coresight/ultrasoc-smb.c58
-rw-r--r--drivers/hwtracing/coresight/ultrasoc-smb.h6
-rw-r--r--drivers/hwtracing/ptt/hisi_ptt.c14
-rw-r--r--drivers/infiniband/core/umem.c6
-rw-r--r--drivers/infiniband/core/verbs.c2
-rw-r--r--drivers/infiniband/hw/bnxt_re/main.c2
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v2.c13
-rw-r--r--drivers/infiniband/hw/irdma/hw.c16
-rw-r--r--drivers/infiniband/hw/irdma/main.c2
-rw-r--r--drivers/infiniband/hw/irdma/main.h2
-rw-r--r--drivers/infiniband/hw/irdma/verbs.c35
-rw-r--r--drivers/infiniband/hw/irdma/verbs.h1
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-clt.c7
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-srv.c37
-rw-r--r--drivers/iommu/intel/dmar.c18
-rw-r--r--drivers/iommu/intel/iommu.c18
-rw-r--r--drivers/iommu/intel/iommu.h3
-rw-r--r--drivers/iommu/intel/svm.c26
-rw-r--r--drivers/iommu/iommu.c79
-rw-r--r--drivers/iommu/iommufd/device.c14
-rw-r--r--drivers/iommu/iommufd/hw_pagetable.c8
-rw-r--r--drivers/iommu/iommufd/ioas.c14
-rw-r--r--drivers/iommu/iommufd/iommufd_private.h70
-rw-r--r--drivers/iommu/iommufd/main.c146
-rw-r--r--drivers/iommu/iommufd/selftest.c14
-rw-r--r--drivers/iommu/iommufd/vfio_compat.c18
-rw-r--r--drivers/iommu/of_iommu.c14
-rw-r--r--drivers/leds/led-class.c14
-rw-r--r--drivers/leds/trigger/ledtrig-netdev.c11
-rw-r--r--drivers/md/bcache/btree.c16
-rw-r--r--drivers/md/bcache/journal.c20
-rw-r--r--drivers/md/bcache/movinggc.c16
-rw-r--r--drivers/md/bcache/request.c74
-rw-r--r--drivers/md/bcache/request.h2
-rw-r--r--drivers/md/bcache/super.c40
-rw-r--r--drivers/md/bcache/writeback.c16
-rw-r--r--drivers/md/dm-flakey.c2
-rw-r--r--drivers/md/dm-verity-fec.c3
-rw-r--r--drivers/md/dm-verity-target.c7
-rw-r--r--drivers/md/dm-verity.h6
-rw-r--r--drivers/md/md.c144
-rw-r--r--drivers/md/raid5.c4
-rw-r--r--drivers/media/pci/ivtv/Kconfig4
-rw-r--r--drivers/media/pci/ivtv/ivtvfb.c6
-rw-r--r--drivers/media/pci/mgb4/Kconfig1
-rw-r--r--drivers/media/pci/mgb4/mgb4_core.c20
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_pipe.c2
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_rpf.c10
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_rwpf.c8
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_rwpf.h4
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_wpf.c29
-rw-r--r--drivers/misc/mei/client.c4
-rw-r--r--drivers/misc/mei/pxp/mei_pxp.c3
-rw-r--r--drivers/mmc/core/block.c2
-rw-r--r--drivers/mmc/core/core.c9
-rw-r--r--drivers/mmc/host/cqhci-core.c44
-rw-r--r--drivers/mmc/host/sdhci-pci-gli.c54
-rw-r--r--drivers/mmc/host/sdhci-sprd.c25
-rw-r--r--drivers/net/arcnet/arcdevice.h2
-rw-r--r--drivers/net/arcnet/com20020-pci.c89
-rw-r--r--drivers/net/dsa/microchip/ksz_common.c16
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.c26
-rw-r--r--drivers/net/dsa/mv88e6xxx/pcs-639x.c31
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ptp.c10
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ptp.h4
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ring.c18
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c1
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c11
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c16
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c29
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_enet.c53
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_enet.h3
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c2
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_ethtool.c12
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_txrx.h1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lag.c122
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lag.h1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sriov.c7
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vf_lib.c20
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vf_vsi_vlan_ops.c11
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl.c30
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mbox.h2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mcs.c18
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mcs.h2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mcs_reg.h31
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.c3
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.h1
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c5
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c12
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c8
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.c4
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h1
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c3
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c6
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c18
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c120
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c20
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c127
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_dev.h2
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_lif.c16
-rw-r--r--drivers/net/ethernet/realtek/r8169_main.c16
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c69
-rw-r--r--drivers/net/ethernet/renesas/rswitch.c22
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac5.c45
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac5.h4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/hwif.h4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/mmc_core.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c8
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c1
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_lib.c2
-rw-r--r--drivers/net/hyperv/Kconfig1
-rw-r--r--drivers/net/netdevsim/bpf.c4
-rw-r--r--drivers/net/netkit.c6
-rw-r--r--drivers/net/usb/r8152.c28
-rw-r--r--drivers/net/veth.c3
-rw-r--r--drivers/net/wireless/ath/ath9k/Kconfig4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/mcu.c1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/main.c4
-rw-r--r--drivers/nvme/host/Kconfig5
-rw-r--r--drivers/nvme/host/core.c86
-rw-r--r--drivers/nvme/host/fc.c6
-rw-r--r--drivers/nvme/host/ioctl.c21
-rw-r--r--drivers/nvme/host/nvme.h11
-rw-r--r--drivers/nvme/host/pci.c30
-rw-r--r--drivers/nvme/host/rdma.c23
-rw-r--r--drivers/nvme/host/tcp.c27
-rw-r--r--drivers/nvme/target/Kconfig5
-rw-r--r--drivers/nvme/target/configfs.c3
-rw-r--r--drivers/nvmem/core.c6
-rw-r--r--drivers/of/dynamic.c5
-rw-r--r--drivers/parport/parport_pc.c21
-rw-r--r--drivers/phy/qualcomm/Kconfig2
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-combo.c44
-rw-r--r--drivers/pinctrl/cirrus/Kconfig3
-rw-r--r--drivers/pinctrl/core.c6
-rw-r--r--drivers/pinctrl/nxp/pinctrl-s32cc.c4
-rw-r--r--drivers/pinctrl/pinctrl-cy8c95x0.c1
-rw-r--r--drivers/pinctrl/realtek/pinctrl-rtd.c4
-rw-r--r--drivers/pinctrl/stm32/pinctrl-stm32.c13
-rw-r--r--drivers/platform/mellanox/mlxbf-bootctl.c39
-rw-r--r--drivers/platform/mellanox/mlxbf-pmc.c14
-rw-r--r--drivers/platform/surface/aggregator/core.c5
-rw-r--r--drivers/platform/x86/Kconfig2
-rw-r--r--drivers/platform/x86/asus-nb-wmi.c61
-rw-r--r--drivers/platform/x86/asus-wmi.c58
-rw-r--r--drivers/platform/x86/asus-wmi.h7
-rw-r--r--drivers/platform/x86/wmi.c5
-rw-r--r--drivers/pmdomain/arm/scmi_perf_domain.c2
-rw-r--r--drivers/pmdomain/qcom/rpmpd.c1
-rw-r--r--drivers/powercap/dtpm_cpu.c23
-rw-r--r--drivers/powercap/dtpm_devfreq.c11
-rw-r--r--drivers/pwm/pwm-bcm2835.c2
-rw-r--r--drivers/scsi/be2iscsi/be_main.c1
-rw-r--r--drivers/scsi/sd.c9
-rw-r--r--drivers/soc/qcom/Kconfig1
-rw-r--r--drivers/soc/qcom/pmic_glink_altmode.c33
-rw-r--r--drivers/staging/sm750fb/sm750.c65
-rw-r--r--drivers/tee/optee/device.c17
-rw-r--r--drivers/tty/serial/8250/8250_dw.c1
-rw-r--r--drivers/tty/serial/8250/8250_early.c1
-rw-r--r--drivers/tty/serial/8250/8250_omap.c14
-rw-r--r--drivers/tty/serial/amba-pl011.c112
-rw-r--r--drivers/tty/serial/ma35d1_serial.c10
-rw-r--r--drivers/tty/serial/sc16is7xx.c12
-rw-r--r--drivers/ufs/core/ufshcd.c13
-rw-r--r--drivers/usb/gadget/function/f_hid.c7
-rw-r--r--drivers/usb/gadget/udc/core.c4
-rw-r--r--drivers/usb/host/xhci-pci.c2
-rw-r--r--drivers/usb/typec/class.c5
-rw-r--r--drivers/usb/typec/mux/Kconfig2
-rw-r--r--drivers/usb/typec/mux/nb7vpq904m.c44
-rw-r--r--drivers/usb/typec/tcpm/Kconfig1
-rw-r--r--drivers/usb/typec/tcpm/qcom/qcom_pmic_typec.c41
-rw-r--r--drivers/vdpa/mlx5/net/mlx5_vnet.c7
-rw-r--r--drivers/vdpa/pds/debugfs.c2
-rw-r--r--drivers/vdpa/pds/vdpa_dev.c7
-rw-r--r--drivers/vfio/pci/pds/pci_drv.c4
-rw-r--r--drivers/vfio/pci/pds/vfio_dev.c30
-rw-r--r--drivers/vfio/pci/pds/vfio_dev.h2
-rw-r--r--drivers/video/fbdev/Kconfig50
-rw-r--r--drivers/video/fbdev/acornfb.c2
-rw-r--r--drivers/video/fbdev/amba-clcd.c2
-rw-r--r--drivers/video/fbdev/arcfb.c114
-rw-r--r--drivers/video/fbdev/au1100fb.c2
-rw-r--r--drivers/video/fbdev/au1200fb.c11
-rw-r--r--drivers/video/fbdev/clps711x-fb.c4
-rw-r--r--drivers/video/fbdev/core/Kconfig7
-rw-r--r--drivers/video/fbdev/core/Makefile2
-rw-r--r--drivers/video/fbdev/core/cfbcopyarea.c3
-rw-r--r--drivers/video/fbdev/core/cfbfillrect.c3
-rw-r--r--drivers/video/fbdev/core/cfbimgblt.c3
-rw-r--r--drivers/video/fbdev/core/fb_chrdev.c68
-rw-r--r--drivers/video/fbdev/core/fb_defio.c2
-rw-r--r--drivers/video/fbdev/core/fb_io_fops.c36
-rw-r--r--drivers/video/fbdev/core/fb_sys_fops.c6
-rw-r--r--drivers/video/fbdev/core/syscopyarea.c3
-rw-r--r--drivers/video/fbdev/core/sysfillrect.c3
-rw-r--r--drivers/video/fbdev/core/sysimgblt.c3
-rw-r--r--drivers/video/fbdev/cyber2000fb.c9
-rw-r--r--drivers/video/fbdev/ep93xx-fb.c2
-rw-r--r--drivers/video/fbdev/gbefb.c2
-rw-r--r--drivers/video/fbdev/omap/omapfb_main.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/omapfb-main.c2
-rw-r--r--drivers/video/fbdev/ps3fb.c11
-rw-r--r--drivers/video/fbdev/sa1100fb.c2
-rw-r--r--drivers/video/fbdev/sbuslib.c5
-rw-r--r--drivers/video/fbdev/sh_mobile_lcdcfb.c16
-rw-r--r--drivers/video/fbdev/sm712fb.c6
-rw-r--r--drivers/video/fbdev/smscufx.c2
-rw-r--r--drivers/video/fbdev/udlfb.c2
-rw-r--r--drivers/video/fbdev/vermilion/vermilion.c2
-rw-r--r--drivers/video/fbdev/vfb.c10
-rw-r--r--drivers/video/fbdev/vt8500lcdfb.c4
-rw-r--r--drivers/video/fbdev/wm8505fb.c2
-rw-r--r--drivers/xen/events/events_base.c4
-rw-r--r--fs/Kconfig1
-rw-r--r--fs/bcachefs/Kconfig12
-rw-r--r--fs/bcachefs/alloc_foreground.c30
-rw-r--r--fs/bcachefs/bcachefs.h4
-rw-r--r--fs/bcachefs/bcachefs_format.h8
-rw-r--r--fs/bcachefs/btree_gc.c9
-rw-r--r--fs/bcachefs/btree_io.c7
-rw-r--r--fs/bcachefs/btree_iter.c6
-rw-r--r--fs/bcachefs/btree_journal_iter.c18
-rw-r--r--fs/bcachefs/btree_journal_iter.h10
-rw-r--r--fs/bcachefs/btree_update_interior.c14
-rw-r--r--fs/bcachefs/buckets.c10
-rw-r--r--fs/bcachefs/compress.c16
-rw-r--r--fs/bcachefs/data_update.c92
-rw-r--r--fs/bcachefs/data_update.h9
-rw-r--r--fs/bcachefs/errcode.h3
-rw-r--r--fs/bcachefs/extents.c30
-rw-r--r--fs/bcachefs/fs-io-direct.c8
-rw-r--r--fs/bcachefs/fs.c3
-rw-r--r--fs/bcachefs/io_read.c2
-rw-r--r--fs/bcachefs/io_write.c14
-rw-r--r--fs/bcachefs/io_write.h3
-rw-r--r--fs/bcachefs/journal.c2
-rw-r--r--fs/bcachefs/journal.h4
-rw-r--r--fs/bcachefs/journal_io.c29
-rw-r--r--fs/bcachefs/journal_io.h2
-rw-r--r--fs/bcachefs/move.c126
-rw-r--r--fs/bcachefs/move.h19
-rw-r--r--fs/bcachefs/movinggc.c2
-rw-r--r--fs/bcachefs/recovery.c11
-rw-r--r--fs/bcachefs/replicas.c69
-rw-r--r--fs/bcachefs/replicas.h2
-rw-r--r--fs/bcachefs/snapshot.c2
-rw-r--r--fs/bcachefs/super-io.c5
-rw-r--r--fs/bcachefs/super.c34
-rw-r--r--fs/bcachefs/super_types.h1
-rw-r--r--fs/bcachefs/trace.h6
-rw-r--r--fs/btrfs/disk-io.c1
-rw-r--r--fs/btrfs/extent_io.c11
-rw-r--r--fs/btrfs/ioctl.c1
-rw-r--r--fs/btrfs/ref-verify.c2
-rw-r--r--fs/btrfs/send.c2
-rw-r--r--fs/btrfs/super.c5
-rw-r--r--fs/btrfs/transaction.c2
-rw-r--r--fs/btrfs/tree-checker.c39
-rw-r--r--fs/btrfs/volumes.c9
-rw-r--r--fs/debugfs/file.c90
-rw-r--r--fs/debugfs/inode.c64
-rw-r--r--fs/debugfs/internal.h15
-rw-r--r--fs/ext2/file.c1
-rw-r--r--fs/nilfs2/sufile.c42
-rw-r--r--fs/nilfs2/the_nilfs.c6
-rw-r--r--fs/proc/task_mmu.c26
-rw-r--r--fs/smb/client/cifsfs.c174
-rw-r--r--fs/smb/client/cifspdu.h24
-rw-r--r--fs/smb/client/cifssmb.c6
-rw-r--r--fs/smb/client/connect.c35
-rw-r--r--fs/smb/client/inode.c4
-rw-r--r--fs/smb/client/smb2ops.c15
-rw-r--r--fs/smb/client/smb2pdu.c42
-rw-r--r--fs/smb/client/smb2pdu.h16
-rw-r--r--fs/smb/common/smb2pdu.h17
-rw-r--r--fs/smb/server/ksmbd_work.c10
-rw-r--r--fs/smb/server/oplock.c3
-rw-r--r--fs/smb/server/smb2pdu.c162
-rw-r--r--fs/smb/server/smbacl.c7
-rw-r--r--fs/smb/server/smbacl.h2
-rw-r--r--fs/smb/server/vfs.c70
-rw-r--r--fs/smb/server/vfs.h10
-rw-r--r--fs/smb/server/vfs_cache.c33
-rw-r--r--fs/smb/server/vfs_cache.h6
-rw-r--r--fs/squashfs/block.c2
-rw-r--r--include/drm/bridge/aux-bridge.h37
-rw-r--r--include/drm/display/drm_dp.h3
-rw-r--r--include/drm/drm_atomic_helper.h2
-rw-r--r--include/drm/drm_auth.h22
-rw-r--r--include/drm/drm_bridge.h4
-rw-r--r--include/drm/drm_device.h71
-rw-r--r--include/drm/drm_drv.h28
-rw-r--r--include/drm/drm_encoder.h16
-rw-r--r--include/drm/drm_file.h17
-rw-r--r--include/drm/drm_framebuffer.h12
-rw-r--r--include/drm/drm_gpuvm.h27
-rw-r--r--include/drm/drm_ioctl.h11
-rw-r--r--include/drm/drm_legacy.h331
-rw-r--r--include/drm/drm_mipi_dsi.h2
-rw-r--r--include/drm/drm_modeset_helper_vtables.h10
-rw-r--r--include/drm/drm_plane.h24
-rw-r--r--include/drm/drm_plane_helper.h2
-rw-r--r--include/drm/drm_prime.h7
-rw-r--r--include/drm/gpu_scheduler.h6
-rw-r--r--include/drm/i915_pciids.h3
-rw-r--r--include/linux/acpi.h22
-rw-r--r--include/linux/amd-pstate.h4
-rw-r--r--include/linux/arm_ffa.h2
-rw-r--r--include/linux/blk_types.h4
-rw-r--r--include/linux/bpf.h3
-rw-r--r--include/linux/closure.h9
-rw-r--r--include/linux/debugfs.h19
-rw-r--r--include/linux/dma-buf.h11
-rw-r--r--include/linux/dma-fence.h15
-rw-r--r--include/linux/fb.h16
-rw-r--r--include/linux/fw_table.h3
-rw-r--r--include/linux/highmem.h2
-rw-r--r--include/linux/hugetlb.h5
-rw-r--r--include/linux/ieee80211.h4
-rw-r--r--include/linux/io_uring_types.h3
-rw-r--r--include/linux/iommu.h1
-rw-r--r--include/linux/kprobes.h13
-rw-r--r--include/linux/platform_data/x86/asus-wmi.h3
-rw-r--r--include/linux/rethook.h7
-rw-r--r--include/linux/skmsg.h1
-rw-r--r--include/linux/stmmac.h1
-rw-r--r--include/linux/tcp.h8
-rw-r--r--include/linux/units.h1
-rw-r--r--include/linux/usb/r8152.h1
-rw-r--r--include/linux/vfio.h8
-rw-r--r--include/net/af_unix.h1
-rw-r--r--include/net/cfg80211.h46
-rw-r--r--include/net/genetlink.h2
-rw-r--r--include/net/neighbour.h2
-rw-r--r--include/net/tcp.h9
-rw-r--r--include/net/tcp_ao.h6
-rw-r--r--include/rdma/ib_umem.h9
-rw-r--r--include/rdma/ib_verbs.h1
-rw-r--r--include/scsi/scsi_device.h12
-rw-r--r--include/sound/cs35l41.h2
-rw-r--r--include/uapi/drm/drm.h52
-rw-r--r--include/uapi/drm/drm_mode.h9
-rw-r--r--include/uapi/drm/pvr_drm.h10
-rw-r--r--include/uapi/drm/v3d_drm.h240
-rw-r--r--include/uapi/linux/stddef.h2
-rw-r--r--include/uapi/linux/sync_file.h22
-rw-r--r--include/uapi/linux/v4l2-subdev.h2
-rw-r--r--io_uring/cancel.c11
-rw-r--r--io_uring/io_uring.c104
-rw-r--r--io_uring/io_uring.h3
-rw-r--r--io_uring/kbuf.c177
-rw-r--r--io_uring/kbuf.h5
-rw-r--r--io_uring/rsrc.h7
-rw-r--r--kernel/Kconfig.kexec1
-rw-r--r--kernel/bpf/arraymap.c58
-rw-r--r--kernel/bpf/core.c12
-rw-r--r--kernel/bpf/memalloc.c2
-rw-r--r--kernel/cgroup/legacy_freezer.c8
-rw-r--r--kernel/events/core.c61
-rw-r--r--kernel/freezer.c2
-rw-r--r--kernel/kprobes.c4
-rw-r--r--kernel/trace/rethook.c23
-rw-r--r--kernel/trace/ring_buffer.c23
-rw-r--r--kernel/trace/trace.c158
-rw-r--r--kernel/workqueue.c22
-rw-r--r--lib/closure.c5
-rw-r--r--lib/fw_table.c2
-rw-r--r--lib/group_cpus.c22
-rw-r--r--lib/kunit/kunit-test.c2
-rw-r--r--lib/kunit/test.c42
-rw-r--r--lib/objpool.c17
-rw-r--r--mm/Kconfig16
-rw-r--r--mm/damon/core.c1
-rw-r--r--mm/damon/sysfs-schemes.c49
-rw-r--r--mm/filemap.c2
-rw-r--r--mm/hugetlb.c7
-rw-r--r--mm/kmemleak.c40
-rw-r--r--mm/madvise.c11
-rw-r--r--mm/memcontrol.c2
-rw-r--r--mm/memory.c1
-rw-r--r--mm/memory_hotplug.c15
-rw-r--r--net/core/drop_monitor.c4
-rw-r--r--net/core/filter.c19
-rw-r--r--net/core/scm.c6
-rw-r--r--net/core/skmsg.c2
-rw-r--r--net/ethtool/netlink.c1
-rw-r--r--net/ipv4/igmp.c6
-rw-r--r--net/ipv4/ip_gre.c11
-rw-r--r--net/ipv4/tcp.c28
-rw-r--r--net/ipv4/tcp_ao.c17
-rw-r--r--net/ipv4/tcp_input.c11
-rw-r--r--net/ipv4/tcp_ipv4.c4
-rw-r--r--net/ipv4/tcp_minisocks.c2
-rw-r--r--net/ipv4/tcp_output.c15
-rw-r--r--net/ipv6/ip6_fib.c6
-rw-r--r--net/ipv6/tcp_ipv6.c2
-rw-r--r--net/mac80211/Kconfig2
-rw-r--r--net/mac80211/debugfs_netdev.c150
-rw-r--r--net/mac80211/debugfs_sta.c74
-rw-r--r--net/mac80211/driver-ops.h9
-rw-r--r--net/mac80211/ht.c1
-rw-r--r--net/mptcp/options.c1
-rw-r--r--net/netfilter/nf_bpf_link.c10
-rw-r--r--net/netfilter/nf_tables_api.c5
-rw-r--r--net/netfilter/nft_dynset.c13
-rw-r--r--net/netfilter/nft_exthdr.c4
-rw-r--r--net/netfilter/nft_fib.c8
-rw-r--r--net/netfilter/nft_set_pipapo.c3
-rw-r--r--net/netfilter/xt_owner.c16
-rw-r--r--net/netlink/genetlink.c3
-rw-r--r--net/packet/af_packet.c16
-rw-r--r--net/packet/internal.h2
-rw-r--r--net/psample/psample.c3
-rw-r--r--net/smc/af_smc.c4
-rw-r--r--net/smc/smc_clc.c9
-rw-r--r--net/smc/smc_clc.h4
-rw-r--r--net/tls/tls_sw.c2
-rw-r--r--net/unix/af_unix.c2
-rw-r--r--net/unix/unix_bpf.c5
-rw-r--r--net/vmw_vsock/virtio_transport_common.c3
-rw-r--r--net/wireless/core.c6
-rw-r--r--net/wireless/core.h1
-rw-r--r--net/wireless/debugfs.c160
-rw-r--r--net/wireless/nl80211.c55
-rw-r--r--net/xdp/xsk.c5
-rwxr-xr-xscripts/checkstack.pl8
-rwxr-xr-xscripts/dtc/dt-extract-compatibles14
-rw-r--r--scripts/gcc-plugins/randomize_layout_plugin.c3
-rw-r--r--scripts/gdb/linux/device.py16
-rw-r--r--scripts/gdb/linux/tasks.py18
-rw-r--r--sound/core/pcm.c1
-rw-r--r--sound/drivers/pcmtest.c13
-rw-r--r--sound/hda/intel-nhlt.c33
-rw-r--r--sound/pci/hda/cs35l41_hda.c28
-rw-r--r--sound/pci/hda/cs35l56_hda_spi.c4
-rw-r--r--sound/pci/hda/hda_intel.c2
-rw-r--r--sound/pci/hda/patch_realtek.c28
-rw-r--r--sound/soc/amd/acp-config.c14
-rw-r--r--sound/soc/amd/yc/acp6x-mach.c21
-rw-r--r--sound/soc/codecs/cs35l41-lib.c6
-rw-r--r--sound/soc/codecs/cs35l41.c4
-rw-r--r--sound/soc/codecs/cs43130.c6
-rw-r--r--sound/soc/codecs/da7219-aad.c2
-rw-r--r--sound/soc/codecs/hdac_hda.c23
-rw-r--r--sound/soc/codecs/lpass-tx-macro.c5
-rw-r--r--sound/soc/codecs/nau8822.c9
-rw-r--r--sound/soc/codecs/rt5645.c10
-rw-r--r--sound/soc/codecs/wm8974.c6
-rw-r--r--sound/soc/codecs/wm_adsp.c8
-rw-r--r--sound/soc/fsl/Kconfig1
-rw-r--r--sound/soc/fsl/fsl_sai.c21
-rw-r--r--sound/soc/fsl/fsl_xcvr.c14
-rw-r--r--sound/soc/intel/boards/skl_hda_dsp_generic.c2
-rw-r--r--sound/soc/intel/boards/sof_sdw.c17
-rw-r--r--sound/soc/intel/skylake/skl-pcm.c9
-rw-r--r--sound/soc/intel/skylake/skl-sst-ipc.c4
-rw-r--r--sound/soc/qcom/sc8280xp.c17
-rw-r--r--sound/soc/soc-ops.c2
-rw-r--r--sound/soc/soc-pcm.c11
-rw-r--r--sound/soc/sof/ipc3-topology.c2
-rw-r--r--sound/soc/sof/ipc4-control.c20
-rw-r--r--sound/soc/sof/ipc4-topology.c61
-rw-r--r--sound/soc/sof/ipc4-topology.h34
-rw-r--r--sound/soc/sof/mediatek/mt8186/mt8186.c3
-rw-r--r--sound/soc/sof/sof-audio.c65
-rw-r--r--sound/soc/sof/sof-audio.h2
-rw-r--r--sound/soc/sof/topology.c4
-rw-r--r--sound/usb/mixer_quirks.c30
-rw-r--r--tools/arch/arm64/include/asm/cputype.h5
-rw-r--r--tools/arch/arm64/include/uapi/asm/kvm.h32
-rw-r--r--tools/arch/arm64/include/uapi/asm/perf_regs.h10
-rw-r--r--tools/arch/arm64/tools/Makefile2
-rw-r--r--tools/arch/s390/include/uapi/asm/kvm.h16
-rw-r--r--tools/arch/x86/include/asm/cpufeatures.h16
-rw-r--r--tools/arch/x86/include/asm/disabled-features.h16
-rw-r--r--tools/arch/x86/include/asm/msr-index.h23
-rw-r--r--tools/arch/x86/include/uapi/asm/prctl.h12
-rw-r--r--tools/include/asm-generic/unaligned.h1
-rw-r--r--tools/include/uapi/asm-generic/unistd.h12
-rw-r--r--tools/include/uapi/drm/drm.h20
-rw-r--r--tools/include/uapi/drm/i915_drm.h8
-rw-r--r--tools/include/uapi/linux/fscrypt.h3
-rw-r--r--tools/include/uapi/linux/kvm.h24
-rw-r--r--tools/include/uapi/linux/mount.h3
-rw-r--r--tools/include/uapi/linux/vhost.h8
-rw-r--r--tools/net/ynl/generated/devlink-user.c87
-rw-r--r--tools/net/ynl/generated/ethtool-user.c51
-rw-r--r--tools/net/ynl/generated/fou-user.c6
-rw-r--r--tools/net/ynl/generated/handshake-user.c3
-rwxr-xr-xtools/net/ynl/ynl-gen-c.py10
-rw-r--r--tools/perf/MANIFEST2
-rw-r--r--tools/perf/Makefile.perf24
-rw-r--r--tools/perf/arch/mips/entry/syscalls/syscall_n64.tbl4
-rw-r--r--tools/perf/arch/powerpc/entry/syscalls/syscall.tbl4
-rw-r--r--tools/perf/arch/s390/entry/syscalls/syscall.tbl4
-rw-r--r--tools/perf/arch/x86/entry/syscalls/syscall_64.tbl3
-rw-r--r--tools/perf/builtin-kwork.c2
-rw-r--r--tools/perf/builtin-list.c6
-rw-r--r--tools/perf/pmu-events/arch/arm64/ampere/ampereone/metrics.json2
-rw-r--r--tools/perf/trace/beauty/include/linux/socket.h1
-rw-r--r--tools/perf/util/Build2
-rw-r--r--tools/perf/util/bpf_lock_contention.c3
-rw-r--r--tools/perf/util/metricgroup.c2
-rw-r--r--tools/testing/nvdimm/test/ndtest.c2
-rw-r--r--tools/testing/selftests/bpf/prog_tests/sockmap_listen.c51
-rw-r--r--tools/testing/selftests/bpf/prog_tests/tailcalls.c84
-rw-r--r--tools/testing/selftests/bpf/progs/tailcall_poke.c32
-rw-r--r--tools/testing/selftests/bpf/progs/test_sockmap_listen.c7
-rw-r--r--tools/testing/selftests/iommu/iommufd_utils.h13
-rw-r--r--tools/testing/selftests/kvm/Makefile7
-rw-r--r--tools/testing/selftests/kvm/x86_64/nx_huge_pages_test.c2
-rw-r--r--tools/testing/selftests/mm/Makefile4
-rw-r--r--tools/testing/selftests/mm/pagemap_ioctl.c9
-rw-r--r--tools/testing/selftests/net/af_unix/diag_uid.c1
-rw-r--r--tools/testing/selftests/net/cmsg_sender.c2
-rw-r--r--tools/testing/selftests/net/ipsec.c4
-rw-r--r--tools/testing/selftests/net/mptcp/mptcp_connect.c11
-rw-r--r--tools/testing/selftests/net/mptcp/mptcp_inq.c11
-rw-r--r--virt/kvm/kvm_main.c18
996 files changed, 15248 insertions, 15053 deletions
diff --git a/.mailmap b/.mailmap
index 43031441b2d9..19eb49e55836 100644
--- a/.mailmap
+++ b/.mailmap
@@ -117,6 +117,7 @@ Changbin Du <[email protected]> <[email protected]>
diff --git a/CREDITS b/CREDITS
index f33a33fd2371..81845c39e3cf 100644
--- a/CREDITS
+++ b/CREDITS
@@ -2944,6 +2944,14 @@ D: IPX development and support
N: Venkatesh Pallipadi (Venki)
D: x86/HPET
+N: Antti Palosaari
+D: Various DVB drivers
+W: https://palosaari.fi/linux/
+S: Yliopistokatu 1 D 513
+S: FI-90570 Oulu
+S: FINLAND
+
N: Kyungmin Park
D: Samsung S5Pv210 and Exynos4210 mobile platforms
diff --git a/Documentation/ABI/testing/sysfs-bus-optee-devices b/Documentation/ABI/testing/sysfs-bus-optee-devices
index 0f58701367b6..af31e5a22d89 100644
--- a/Documentation/ABI/testing/sysfs-bus-optee-devices
+++ b/Documentation/ABI/testing/sysfs-bus-optee-devices
@@ -6,3 +6,12 @@ Description:
OP-TEE bus provides reference to registered drivers under this directory. The <uuid>
matches Trusted Application (TA) driver and corresponding TA in secure OS. Drivers
are free to create needed API under optee-ta-<uuid> directory.
+
+What: /sys/bus/tee/devices/optee-ta-<uuid>/need_supplicant
+Date: November 2023
+KernelVersion: 6.7
+Description:
+ Allows to distinguish whether an OP-TEE based TA/device requires user-space
+ tee-supplicant to function properly or not. This attribute will be present for
+ devices which depend on tee-supplicant to be running.
diff --git a/Documentation/ABI/testing/sysfs-class-led b/Documentation/ABI/testing/sysfs-class-led
index b2ff0012c0f2..2e24ac3bd7ef 100644
--- a/Documentation/ABI/testing/sysfs-class-led
+++ b/Documentation/ABI/testing/sysfs-class-led
@@ -59,15 +59,6 @@ Description:
brightness. Reading this file when no hw brightness change
event has happened will return an ENODATA error.
-What: /sys/class/leds/<led>/color
-Date: June 2023
-KernelVersion: 6.5
-Description:
- Color of the LED.
-
- This is a read-only file. Reading this file returns the color
- of the LED as a string (e.g: "red", "green", "multicolor").
-
What: /sys/class/leds/<led>/trigger
Date: March 2006
KernelVersion: 2.6.17
diff --git a/Documentation/accel/qaic/qaic.rst b/Documentation/accel/qaic/qaic.rst
index f81020736ebf..efb7771273bb 100644
--- a/Documentation/accel/qaic/qaic.rst
+++ b/Documentation/accel/qaic/qaic.rst
@@ -93,8 +93,15 @@ commands (does not impact QAIC).
uAPI
====
+QAIC creates an accel device per phsyical PCIe device. This accel device exists
+for as long as the PCIe device is known to Linux.
+
+The PCIe device may not be in the state to accept requests from userspace at
+all times. QAIC will trigger KOBJ_ONLINE/OFFLINE uevents to advertise when the
+device can accept requests (ONLINE) and when the device is no longer accepting
+requests (OFFLINE) because of a reset or other state transition.
+
QAIC defines a number of driver specific IOCTLs as part of the userspace API.
-This section describes those APIs.
DRM_IOCTL_QAIC_MANAGE
This IOCTL allows userspace to send a NNC request to the QSM. The call will
diff --git a/Documentation/core-api/pin_user_pages.rst b/Documentation/core-api/pin_user_pages.rst
index d3c1f6d8c0e0..6b5f7e6e7155 100644
--- a/Documentation/core-api/pin_user_pages.rst
+++ b/Documentation/core-api/pin_user_pages.rst
@@ -153,6 +153,8 @@ NOTE: Some pages, such as DAX pages, cannot be pinned with longterm pins. That's
because DAX pages do not have a separate page cache, and so "pinning" implies
locking down file system blocks, which is not (yet) supported in that way.
+.. _mmu-notifier-registration-case:
+
CASE 3: MMU notifier registration, with or without page faulting hardware
-------------------------------------------------------------------------
Device drivers can pin pages via get_user_pages*(), and register for mmu
diff --git a/Documentation/devicetree/bindings/display/bridge/adi,adv7533.yaml b/Documentation/devicetree/bindings/display/bridge/adi,adv7533.yaml
index 987aa83c2649..df20a3c9c744 100644
--- a/Documentation/devicetree/bindings/display/bridge/adi,adv7533.yaml
+++ b/Documentation/devicetree/bindings/display/bridge/adi,adv7533.yaml
@@ -9,6 +9,9 @@ title: Analog Devices ADV7533/35 HDMI Encoders
maintainers:
- Laurent Pinchart <[email protected]>
+allOf:
+ - $ref: /schemas/sound/dai-common.yaml#
+
description: |
The ADV7533 and ADV7535 are HDMI audio and video transmitters
compatible with HDMI 1.4 and DVI 1.0. They support color space
@@ -89,6 +92,9 @@ properties:
$ref: /schemas/types.yaml#/definitions/uint32
enum: [ 1, 2, 3, 4 ]
+ "#sound-dai-cells":
+ const: 0
+
ports:
description:
The ADV7533/35 has two video ports and one audio port.
diff --git a/Documentation/devicetree/bindings/display/bridge/lontium,lt8912b.yaml b/Documentation/devicetree/bindings/display/bridge/lontium,lt8912b.yaml
index f201ae4af4fb..2cef25215798 100644
--- a/Documentation/devicetree/bindings/display/bridge/lontium,lt8912b.yaml
+++ b/Documentation/devicetree/bindings/display/bridge/lontium,lt8912b.yaml
@@ -55,6 +55,27 @@ properties:
- port@0
- port@1
+ vcchdmipll-supply:
+ description: A 1.8V supply that powers the HDMI PLL.
+
+ vcchdmitx-supply:
+ description: A 1.8V supply that powers the HDMI TX part.
+
+ vcclvdspll-supply:
+ description: A 1.8V supply that powers the LVDS PLL.
+
+ vcclvdstx-supply:
+ description: A 1.8V supply that powers the LVDS TX part.
+
+ vccmipirx-supply:
+ description: A 1.8V supply that powers the MIPI RX part.
+
+ vccsysclk-supply:
+ description: A 1.8V supply that powers the SYSCLK.
+
+ vdd-supply:
+ description: A 1.8V supply that powers the digital part.
+
required:
- compatible
- reg
diff --git a/Documentation/devicetree/bindings/display/fsl,lcdif.yaml b/Documentation/devicetree/bindings/display/fsl,lcdif.yaml
index fc11ab5fc465..1c2be8d6f633 100644
--- a/Documentation/devicetree/bindings/display/fsl,lcdif.yaml
+++ b/Documentation/devicetree/bindings/display/fsl,lcdif.yaml
@@ -51,7 +51,10 @@ properties:
minItems: 1
interrupts:
- maxItems: 1
+ items:
+ - description: LCDIF DMA interrupt
+ - description: LCDIF Error interrupt
+ minItems: 1
power-domains:
maxItems: 1
@@ -131,6 +134,21 @@ allOf:
then:
required:
- power-domains
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - fsl,imx23-lcdif
+ then:
+ properties:
+ interrupts:
+ minItems: 2
+ maxItems: 2
+ else:
+ properties:
+ interrupts:
+ maxItems: 1
examples:
- |
diff --git a/Documentation/devicetree/bindings/display/mediatek/mediatek,dsi.yaml b/Documentation/devicetree/bindings/display/mediatek/mediatek,dsi.yaml
index 537e5304b730..ed24b617090b 100644
--- a/Documentation/devicetree/bindings/display/mediatek/mediatek,dsi.yaml
+++ b/Documentation/devicetree/bindings/display/mediatek/mediatek,dsi.yaml
@@ -10,7 +10,6 @@ maintainers:
- Chun-Kuang Hu <[email protected]>
- Philipp Zabel <[email protected]>
- Jitao Shi <[email protected]>
- - Xinlei Lee <[email protected]>
description: |
The MediaTek DSI function block is a sink of the display subsystem and can
diff --git a/Documentation/devicetree/bindings/display/panel/himax,hx8394.yaml b/Documentation/devicetree/bindings/display/panel/himax,hx8394.yaml
index ffb35288ffbb..916bb7f94206 100644
--- a/Documentation/devicetree/bindings/display/panel/himax,hx8394.yaml
+++ b/Documentation/devicetree/bindings/display/panel/himax,hx8394.yaml
@@ -23,6 +23,7 @@ properties:
items:
- enum:
- hannstar,hsd060bhw4
+ - powkiddy,x55-panel
- const: himax,hx8394
reg: true
@@ -31,6 +32,8 @@ properties:
backlight: true
+ rotation: true
+
port: true
vcc-supply:
diff --git a/Documentation/devicetree/bindings/display/panel/ilitek,ili9881c.yaml b/Documentation/devicetree/bindings/display/panel/ilitek,ili9881c.yaml
index e7ab6224b52e..b1e624be3e33 100644
--- a/Documentation/devicetree/bindings/display/panel/ilitek,ili9881c.yaml
+++ b/Documentation/devicetree/bindings/display/panel/ilitek,ili9881c.yaml
@@ -16,6 +16,7 @@ properties:
compatible:
items:
- enum:
+ - ampire,am8001280g
- bananapi,lhr050h41
- feixin,k101-im2byl02
- tdo,tl050hdv35
diff --git a/Documentation/devicetree/bindings/display/panel/panel-simple.yaml b/Documentation/devicetree/bindings/display/panel/panel-simple.yaml
index 3ec9ee95045f..2021aa82871a 100644
--- a/Documentation/devicetree/bindings/display/panel/panel-simple.yaml
+++ b/Documentation/devicetree/bindings/display/panel/panel-simple.yaml
@@ -73,6 +73,8 @@ properties:
- auo,t215hvn01
# Shanghai AVIC Optoelectronics 7" 1024x600 color TFT-LCD panel
- avic,tm070ddh03
+ # BOE BP101WX1-100 10.1" WXGA (1280x800) LVDS panel
+ - boe,bp101wx1-100
# BOE EV121WXM-N10-1850 12.1" WXGA (1280x800) TFT LCD panel
- boe,ev121wxm-n10-1850
# BOE HV070WSA-100 7.01" WSVGA TFT LCD panel
@@ -144,6 +146,8 @@ properties:
- edt,etmv570g2dhu
# E Ink VB3300-KCA
- eink,vb3300-kca
+ # Evervision Electronics Co. Ltd. VGG644804 5.7" VGA TFT LCD Panel
+ - evervision,vgg644804
# Evervision Electronics Co. Ltd. VGG804821 5.0" WVGA TFT LCD Panel
- evervision,vgg804821
# Foxlink Group 5" WVGA TFT LCD panel
diff --git a/Documentation/devicetree/bindings/display/ti/ti,am65x-dss.yaml b/Documentation/devicetree/bindings/display/ti/ti,am65x-dss.yaml
index ae09cd3cbce1..b6767ef0d24d 100644
--- a/Documentation/devicetree/bindings/display/ti/ti,am65x-dss.yaml
+++ b/Documentation/devicetree/bindings/display/ti/ti,am65x-dss.yaml
@@ -23,6 +23,7 @@ properties:
compatible:
enum:
- ti,am625-dss
+ - ti,am62a7,dss
- ti,am65x-dss
reg:
@@ -87,6 +88,7 @@ properties:
For AM65x DSS, the OLDI output port node from video port 1.
For AM625 DSS, the internal DPI output port node from video
port 1.
+ For AM62A7 DSS, the port is tied off inside the SoC.
port@1:
$ref: /schemas/graph.yaml#/properties/port
@@ -108,6 +110,18 @@ properties:
Input memory (from main memory to dispc) bandwidth limit in
bytes per second
+allOf:
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: ti,am62a7-dss
+ then:
+ properties:
+ ports:
+ properties:
+ port@0: false
+
required:
- compatible
- reg
diff --git a/Documentation/devicetree/bindings/gpu/arm,mali-utgard.yaml b/Documentation/devicetree/bindings/gpu/arm,mali-utgard.yaml
index 0fae1ef013be..abd4aa335fbc 100644
--- a/Documentation/devicetree/bindings/gpu/arm,mali-utgard.yaml
+++ b/Documentation/devicetree/bindings/gpu/arm,mali-utgard.yaml
@@ -29,6 +29,7 @@ properties:
- allwinner,sun50i-a64-mali
- rockchip,rk3036-mali
- rockchip,rk3066-mali
+ - rockchip,rk3128-mali
- rockchip,rk3188-mali
- rockchip,rk3228-mali
- samsung,exynos4210-mali
diff --git a/Documentation/devicetree/bindings/interrupt-controller/qcom,mpm.yaml b/Documentation/devicetree/bindings/interrupt-controller/qcom,mpm.yaml
index 509d20c091af..6a206111d4e0 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/qcom,mpm.yaml
+++ b/Documentation/devicetree/bindings/interrupt-controller/qcom,mpm.yaml
@@ -62,6 +62,9 @@ properties:
- description: MPM pin number
- description: GIC SPI number for the MPM pin
+ '#power-domain-cells':
+ const: 0
+
required:
- compatible
- reg
@@ -93,4 +96,5 @@ examples:
<86 183>,
<90 260>,
<91 260>;
+ #power-domain-cells = <0>;
};
diff --git a/Documentation/devicetree/bindings/perf/riscv,pmu.yaml b/Documentation/devicetree/bindings/perf/riscv,pmu.yaml
index c8448de2f2a0..d01c677ad3c7 100644
--- a/Documentation/devicetree/bindings/perf/riscv,pmu.yaml
+++ b/Documentation/devicetree/bindings/perf/riscv,pmu.yaml
@@ -90,7 +90,7 @@ properties:
bitmap of all MHPMCOUNTERx that can monitor the range of events
dependencies:
- "riscv,event-to-mhpmevent": [ "riscv,event-to-mhpmcounters" ]
+ riscv,event-to-mhpmevent: [ "riscv,event-to-mhpmcounters" ]
required:
- compatible
diff --git a/Documentation/devicetree/bindings/pinctrl/nxp,s32g2-siul2-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/nxp,s32g2-siul2-pinctrl.yaml
index d49aafd8c5f4..a24286e4def6 100644
--- a/Documentation/devicetree/bindings/pinctrl/nxp,s32g2-siul2-pinctrl.yaml
+++ b/Documentation/devicetree/bindings/pinctrl/nxp,s32g2-siul2-pinctrl.yaml
@@ -9,7 +9,7 @@ title: NXP S32G2 pin controller
maintainers:
- Ghennadi Procopciuc <[email protected]>
- - Chester Lin <[email protected]>
+ - Chester Lin <[email protected]>
description: |
S32G2 pinmux is implemented in SIUL2 (System Integration Unit Lite2),
diff --git a/Documentation/devicetree/bindings/pwm/imx-pwm.yaml b/Documentation/devicetree/bindings/pwm/imx-pwm.yaml
index c01dff3b7f84..a84a240a61dc 100644
--- a/Documentation/devicetree/bindings/pwm/imx-pwm.yaml
+++ b/Documentation/devicetree/bindings/pwm/imx-pwm.yaml
@@ -14,12 +14,10 @@ allOf:
properties:
"#pwm-cells":
- description: |
- Should be 2 for i.MX1 and 3 for i.MX27 and newer SoCs. See pwm.yaml
- in this directory for a description of the cells format.
- enum:
- - 2
- - 3
+ description:
+ The only third cell flag supported by this binding is
+ PWM_POLARITY_INVERTED. fsl,imx1-pwm does not support this flags.
+ const: 3
compatible:
oneOf:
diff --git a/Documentation/devicetree/bindings/soc/rockchip/grf.yaml b/Documentation/devicetree/bindings/soc/rockchip/grf.yaml
index e4fa6a07b4fa..1309bf5ae0cd 100644
--- a/Documentation/devicetree/bindings/soc/rockchip/grf.yaml
+++ b/Documentation/devicetree/bindings/soc/rockchip/grf.yaml
@@ -233,6 +233,7 @@ allOf:
- rockchip,rk3399-grf
- rockchip,rk3399-pmugrf
- rockchip,rk3568-pmugrf
+ - rockchip,rk3588-pmugrf
- rockchip,rv1108-grf
- rockchip,rv1108-pmugrf
diff --git a/Documentation/gpu/drm-kms.rst b/Documentation/gpu/drm-kms.rst
index 270d320407c7..13d3627d8bc0 100644
--- a/Documentation/gpu/drm-kms.rst
+++ b/Documentation/gpu/drm-kms.rst
@@ -548,6 +548,8 @@ Plane Composition Properties
.. kernel-doc:: drivers/gpu/drm/drm_blend.c
:doc: overview
+.. _damage_tracking_properties:
+
Damage Tracking Properties
--------------------------
@@ -579,6 +581,12 @@ Variable Refresh Properties
.. kernel-doc:: drivers/gpu/drm/drm_connector.c
:doc: Variable refresh properties
+Cursor Hotspot Properties
+---------------------------
+
+.. kernel-doc:: drivers/gpu/drm/drm_plane.c
+ :doc: hotspot properties
+
Existing KMS Properties
-----------------------
diff --git a/Documentation/gpu/drm-mm.rst b/Documentation/gpu/drm-mm.rst
index acc5901ac840..d55751cad67c 100644
--- a/Documentation/gpu/drm-mm.rst
+++ b/Documentation/gpu/drm-mm.rst
@@ -466,6 +466,8 @@ DRM MM Range Allocator Function References
.. kernel-doc:: drivers/gpu/drm/drm_mm.c
:export:
+.. _drm_gpuvm:
+
DRM GPUVM
=========
@@ -481,6 +483,8 @@ Split and Merge
.. kernel-doc:: drivers/gpu/drm/drm_gpuvm.c
:doc: Split and Merge
+.. _drm_gpuvm_locking:
+
Locking
-------
diff --git a/Documentation/gpu/drm-vm-bind-locking.rst b/Documentation/gpu/drm-vm-bind-locking.rst
new file mode 100644
index 000000000000..a345aa513d12
--- /dev/null
+++ b/Documentation/gpu/drm-vm-bind-locking.rst
@@ -0,0 +1,582 @@
+.. SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+
+===============
+VM_BIND locking
+===============
+
+This document attempts to describe what's needed to get VM_BIND locking right,
+including the userptr mmu_notifier locking. It also discusses some
+optimizations to get rid of the looping through of all userptr mappings and
+external / shared object mappings that is needed in the simplest
+implementation. In addition, there is a section describing the VM_BIND locking
+required for implementing recoverable pagefaults.
+
+The DRM GPUVM set of helpers
+============================
+
+There is a set of helpers for drivers implementing VM_BIND, and this
+set of helpers implements much, but not all of the locking described
+in this document. In particular, it is currently lacking a userptr
+implementation. This document does not intend to describe the DRM GPUVM
+implementation in detail, but it is covered in :ref:`its own
+documentation <drm_gpuvm>`. It is highly recommended for any driver
+implementing VM_BIND to use the DRM GPUVM helpers and to extend it if
+common functionality is missing.
+
+Nomenclature
+============
+
+* ``gpu_vm``: Abstraction of a virtual GPU address space with
+ meta-data. Typically one per client (DRM file-private), or one per
+ execution context.
+* ``gpu_vma``: Abstraction of a GPU address range within a gpu_vm with
+ associated meta-data. The backing storage of a gpu_vma can either be
+ a GEM object or anonymous or page-cache pages mapped also into the CPU
+ address space for the process.
+* ``gpu_vm_bo``: Abstracts the association of a GEM object and
+ a VM. The GEM object maintains a list of gpu_vm_bos, where each gpu_vm_bo
+ maintains a list of gpu_vmas.
+* ``userptr gpu_vma or just userptr``: A gpu_vma, whose backing store
+ is anonymous or page-cache pages as described above.
+* ``revalidating``: Revalidating a gpu_vma means making the latest version
+ of the backing store resident and making sure the gpu_vma's
+ page-table entries point to that backing store.
+* ``dma_fence``: A struct dma_fence that is similar to a struct completion
+ and which tracks GPU activity. When the GPU activity is finished,
+ the dma_fence signals. Please refer to the ``DMA Fences`` section of
+ the :doc:`dma-buf doc </driver-api/dma-buf>`.
+* ``dma_resv``: A struct dma_resv (a.k.a reservation object) that is used
+ to track GPU activity in the form of multiple dma_fences on a
+ gpu_vm or a GEM object. The dma_resv contains an array / list
+ of dma_fences and a lock that needs to be held when adding
+ additional dma_fences to the dma_resv. The lock is of a type that
+ allows deadlock-safe locking of multiple dma_resvs in arbitrary
+ order. Please refer to the ``Reservation Objects`` section of the
+ :doc:`dma-buf doc </driver-api/dma-buf>`.
+* ``exec function``: An exec function is a function that revalidates all
+ affected gpu_vmas, submits a GPU command batch and registers the
+ dma_fence representing the GPU command's activity with all affected
+ dma_resvs. For completeness, although not covered by this document,
+ it's worth mentioning that an exec function may also be the
+ revalidation worker that is used by some drivers in compute /
+ long-running mode.
+* ``local object``: A GEM object which is only mapped within a
+ single VM. Local GEM objects share the gpu_vm's dma_resv.
+* ``external object``: a.k.a shared object: A GEM object which may be shared
+ by multiple gpu_vms and whose backing storage may be shared with
+ other drivers.
+
+Locks and locking order
+=======================
+
+One of the benefits of VM_BIND is that local GEM objects share the gpu_vm's
+dma_resv object and hence the dma_resv lock. So, even with a huge
+number of local GEM objects, only one lock is needed to make the exec
+sequence atomic.
+
+The following locks and locking orders are used:
+
+* The ``gpu_vm->lock`` (optionally an rwsem). Protects the gpu_vm's
+ data structure keeping track of gpu_vmas. It can also protect the
+ gpu_vm's list of userptr gpu_vmas. With a CPU mm analogy this would
+ correspond to the mmap_lock. An rwsem allows several readers to walk
+ the VM tree concurrently, but the benefit of that concurrency most
+ likely varies from driver to driver.
+* The ``userptr_seqlock``. This lock is taken in read mode for each
+ userptr gpu_vma on the gpu_vm's userptr list, and in write mode during mmu
+ notifier invalidation. This is not a real seqlock but described in
+ ``mm/mmu_notifier.c`` as a "Collision-retry read-side/write-side
+ 'lock' a lot like a seqcount. However this allows multiple
+ write-sides to hold it at once...". The read side critical section
+ is enclosed by ``mmu_interval_read_begin() /
+ mmu_interval_read_retry()`` with ``mmu_interval_read_begin()``
+ sleeping if the write side is held.
+ The write side is held by the core mm while calling mmu interval
+ invalidation notifiers.
+* The ``gpu_vm->resv`` lock. Protects the gpu_vm's list of gpu_vmas needing
+ rebinding, as well as the residency state of all the gpu_vm's local
+ GEM objects.
+ Furthermore, it typically protects the gpu_vm's list of evicted and
+ external GEM objects.
+* The ``gpu_vm->userptr_notifier_lock``. This is an rwsem that is
+ taken in read mode during exec and write mode during a mmu notifier
+ invalidation. The userptr notifier lock is per gpu_vm.
+* The ``gem_object->gpuva_lock`` This lock protects the GEM object's
+ list of gpu_vm_bos. This is usually the same lock as the GEM
+ object's dma_resv, but some drivers protects this list differently,
+ see below.
+* The ``gpu_vm list spinlocks``. With some implementations they are needed
+ to be able to update the gpu_vm evicted- and external object
+ list. For those implementations, the spinlocks are grabbed when the
+ lists are manipulated. However, to avoid locking order violations
+ with the dma_resv locks, a special scheme is needed when iterating
+ over the lists.
+
+.. _gpu_vma lifetime:
+
+Protection and lifetime of gpu_vm_bos and gpu_vmas
+==================================================
+
+The GEM object's list of gpu_vm_bos, and the gpu_vm_bo's list of gpu_vmas
+is protected by the ``gem_object->gpuva_lock``, which is typically the
+same as the GEM object's dma_resv, but if the driver
+needs to access these lists from within a dma_fence signalling
+critical section, it can instead choose to protect it with a
+separate lock, which can be locked from within the dma_fence signalling
+critical section. Such drivers then need to pay additional attention
+to what locks need to be taken from within the loop when iterating
+over the gpu_vm_bo and gpu_vma lists to avoid locking-order violations.
+
+The DRM GPUVM set of helpers provide lockdep asserts that this lock is
+held in relevant situations and also provides a means of making itself
+aware of which lock is actually used: :c:func:`drm_gem_gpuva_set_lock`.
+
+Each gpu_vm_bo holds a reference counted pointer to the underlying GEM
+object, and each gpu_vma holds a reference counted pointer to the
+gpu_vm_bo. When iterating over the GEM object's list of gpu_vm_bos and
+over the gpu_vm_bo's list of gpu_vmas, the ``gem_object->gpuva_lock`` must
+not be dropped, otherwise, gpu_vmas attached to a gpu_vm_bo may
+disappear without notice since those are not reference-counted. A
+driver may implement its own scheme to allow this at the expense of
+additional complexity, but this is outside the scope of this document.
+
+In the DRM GPUVM implementation, each gpu_vm_bo and each gpu_vma
+holds a reference count on the gpu_vm itself. Due to this, and to avoid circular
+reference counting, cleanup of the gpu_vm's gpu_vmas must not be done from the
+gpu_vm's destructor. Drivers typically implements a gpu_vm close
+function for this cleanup. The gpu_vm close function will abort gpu
+execution using this VM, unmap all gpu_vmas and release page-table memory.
+
+Revalidation and eviction of local objects
+==========================================
+
+Note that in all the code examples given below we use simplified
+pseudo-code. In particular, the dma_resv deadlock avoidance algorithm
+as well as reserving memory for dma_resv fences is left out.
+
+Revalidation
+____________
+With VM_BIND, all local objects need to be resident when the gpu is
+executing using the gpu_vm, and the objects need to have valid
+gpu_vmas set up pointing to them. Typically, each gpu command buffer
+submission is therefore preceded with a re-validation section:
+
+.. code-block:: C
+
+ dma_resv_lock(gpu_vm->resv);
+
+ // Validation section starts here.
+ for_each_gpu_vm_bo_on_evict_list(&gpu_vm->evict_list, &gpu_vm_bo) {
+ validate_gem_bo(&gpu_vm_bo->gem_bo);
+
+ // The following list iteration needs the Gem object's
+ // dma_resv to be held (it protects the gpu_vm_bo's list of
+ // gpu_vmas, but since local gem objects share the gpu_vm's
+ // dma_resv, it is already held at this point.
+ for_each_gpu_vma_of_gpu_vm_bo(&gpu_vm_bo, &gpu_vma)
+ move_gpu_vma_to_rebind_list(&gpu_vma, &gpu_vm->rebind_list);
+ }
+
+ for_each_gpu_vma_on_rebind_list(&gpu vm->rebind_list, &gpu_vma) {
+ rebind_gpu_vma(&gpu_vma);
+ remove_gpu_vma_from_rebind_list(&gpu_vma);
+ }
+ // Validation section ends here, and job submission starts.
+
+ add_dependencies(&gpu_job, &gpu_vm->resv);
+ job_dma_fence = gpu_submit(&gpu_job));
+
+ add_dma_fence(job_dma_fence, &gpu_vm->resv);
+ dma_resv_unlock(gpu_vm->resv);
+
+The reason for having a separate gpu_vm rebind list is that there
+might be userptr gpu_vmas that are not mapping a buffer object that
+also need rebinding.
+
+Eviction
+________
+
+Eviction of one of these local objects will then look similar to the
+following:
+
+.. code-block:: C
+
+ obj = get_object_from_lru();
+
+ dma_resv_lock(obj->resv);
+ for_each_gpu_vm_bo_of_obj(obj, &gpu_vm_bo);
+ add_gpu_vm_bo_to_evict_list(&gpu_vm_bo, &gpu_vm->evict_list);
+
+ add_dependencies(&eviction_job, &obj->resv);
+ job_dma_fence = gpu_submit(&eviction_job);
+ add_dma_fence(&obj->resv, job_dma_fence);
+
+ dma_resv_unlock(&obj->resv);
+ put_object(obj);
+
+Note that since the object is local to the gpu_vm, it will share the gpu_vm's
+dma_resv lock such that ``obj->resv == gpu_vm->resv``.
+The gpu_vm_bos marked for eviction are put on the gpu_vm's evict list,
+which is protected by ``gpu_vm->resv``. During eviction all local
+objects have their dma_resv locked and, due to the above equality, also
+the gpu_vm's dma_resv protecting the gpu_vm's evict list is locked.
+
+With VM_BIND, gpu_vmas don't need to be unbound before eviction,
+since the driver must ensure that the eviction blit or copy will wait
+for GPU idle or depend on all previous GPU activity. Furthermore, any
+subsequent attempt by the GPU to access freed memory through the
+gpu_vma will be preceded by a new exec function, with a revalidation
+section which will make sure all gpu_vmas are rebound. The eviction
+code holding the object's dma_resv while revalidating will ensure a
+new exec function may not race with the eviction.
+
+A driver can be implemented in such a way that, on each exec function,
+only a subset of vmas are selected for rebind. In this case, all vmas that are
+*not* selected for rebind must be unbound before the exec
+function workload is submitted.
+
+Locking with external buffer objects
+====================================
+
+Since external buffer objects may be shared by multiple gpu_vm's they
+can't share their reservation object with a single gpu_vm. Instead
+they need to have a reservation object of their own. The external
+objects bound to a gpu_vm using one or many gpu_vmas are therefore put on a
+per-gpu_vm list which is protected by the gpu_vm's dma_resv lock or
+one of the :ref:`gpu_vm list spinlocks <Spinlock iteration>`. Once
+the gpu_vm's reservation object is locked, it is safe to traverse the
+external object list and lock the dma_resvs of all external
+objects. However, if instead a list spinlock is used, a more elaborate
+iteration scheme needs to be used.
+
+At eviction time, the gpu_vm_bos of *all* the gpu_vms an external
+object is bound to need to be put on their gpu_vm's evict list.
+However, when evicting an external object, the dma_resvs of the
+gpu_vms the object is bound to are typically not held. Only
+the object's private dma_resv can be guaranteed to be held. If there
+is a ww_acquire context at hand at eviction time we could grab those
+dma_resvs but that could cause expensive ww_mutex rollbacks. A simple
+option is to just mark the gpu_vm_bos of the evicted gem object with
+an ``evicted`` bool that is inspected before the next time the
+corresponding gpu_vm evicted list needs to be traversed. For example, when
+traversing the list of external objects and locking them. At that time,
+both the gpu_vm's dma_resv and the object's dma_resv is held, and the
+gpu_vm_bo marked evicted, can then be added to the gpu_vm's list of
+evicted gpu_vm_bos. The ``evicted`` bool is formally protected by the
+object's dma_resv.
+
+The exec function becomes
+
+.. code-block:: C
+
+ dma_resv_lock(gpu_vm->resv);
+
+ // External object list is protected by the gpu_vm->resv lock.
+ for_each_gpu_vm_bo_on_extobj_list(gpu_vm, &gpu_vm_bo) {
+ dma_resv_lock(gpu_vm_bo.gem_obj->resv);
+ if (gpu_vm_bo_marked_evicted(&gpu_vm_bo))
+ add_gpu_vm_bo_to_evict_list(&gpu_vm_bo, &gpu_vm->evict_list);
+ }
+
+ for_each_gpu_vm_bo_on_evict_list(&gpu_vm->evict_list, &gpu_vm_bo) {
+ validate_gem_bo(&gpu_vm_bo->gem_bo);
+
+ for_each_gpu_vma_of_gpu_vm_bo(&gpu_vm_bo, &gpu_vma)
+ move_gpu_vma_to_rebind_list(&gpu_vma, &gpu_vm->rebind_list);
+ }
+
+ for_each_gpu_vma_on_rebind_list(&gpu vm->rebind_list, &gpu_vma) {
+ rebind_gpu_vma(&gpu_vma);
+ remove_gpu_vma_from_rebind_list(&gpu_vma);
+ }
+
+ add_dependencies(&gpu_job, &gpu_vm->resv);
+ job_dma_fence = gpu_submit(&gpu_job));
+
+ add_dma_fence(job_dma_fence, &gpu_vm->resv);
+ for_each_external_obj(gpu_vm, &obj)
+ add_dma_fence(job_dma_fence, &obj->resv);
+ dma_resv_unlock_all_resv_locks();
+
+And the corresponding shared-object aware eviction would look like:
+
+.. code-block:: C
+
+ obj = get_object_from_lru();
+
+ dma_resv_lock(obj->resv);
+ for_each_gpu_vm_bo_of_obj(obj, &gpu_vm_bo)
+ if (object_is_vm_local(obj))
+ add_gpu_vm_bo_to_evict_list(&gpu_vm_bo, &gpu_vm->evict_list);
+ else
+ mark_gpu_vm_bo_evicted(&gpu_vm_bo);
+
+ add_dependencies(&eviction_job, &obj->resv);
+ job_dma_fence = gpu_submit(&eviction_job);
+ add_dma_fence(&obj->resv, job_dma_fence);
+
+ dma_resv_unlock(&obj->resv);
+ put_object(obj);
+
+.. _Spinlock iteration:
+
+Accessing the gpu_vm's lists without the dma_resv lock held
+===========================================================
+
+Some drivers will hold the gpu_vm's dma_resv lock when accessing the
+gpu_vm's evict list and external objects lists. However, there are
+drivers that need to access these lists without the dma_resv lock
+held, for example due to asynchronous state updates from within the
+dma_fence signalling critical path. In such cases, a spinlock can be
+used to protect manipulation of the lists. However, since higher level
+sleeping locks need to be taken for each list item while iterating
+over the lists, the items already iterated over need to be
+temporarily moved to a private list and the spinlock released
+while processing each item:
+
+.. code block:: C
+
+ struct list_head still_in_list;
+
+ INIT_LIST_HEAD(&still_in_list);
+
+ spin_lock(&gpu_vm->list_lock);
+ do {
+ struct list_head *entry = list_first_entry_or_null(&gpu_vm->list, head);
+
+ if (!entry)
+ break;
+
+ list_move_tail(&entry->head, &still_in_list);
+ list_entry_get_unless_zero(entry);
+ spin_unlock(&gpu_vm->list_lock);
+
+ process(entry);
+
+ spin_lock(&gpu_vm->list_lock);
+ list_entry_put(entry);
+ } while (true);
+
+ list_splice_tail(&still_in_list, &gpu_vm->list);
+ spin_unlock(&gpu_vm->list_lock);
+
+Due to the additional locking and atomic operations, drivers that *can*
+avoid accessing the gpu_vm's list outside of the dma_resv lock
+might want to avoid also this iteration scheme. Particularly, if the
+driver anticipates a large number of list items. For lists where the
+anticipated number of list items is small, where list iteration doesn't
+happen very often or if there is a significant additional cost
+associated with each iteration, the atomic operation overhead
+associated with this type of iteration is, most likely, negligible. Note that
+if this scheme is used, it is necessary to make sure this list
+iteration is protected by an outer level lock or semaphore, since list
+items are temporarily pulled off the list while iterating, and it is
+also worth mentioning that the local list ``still_in_list`` should
+also be considered protected by the ``gpu_vm->list_lock``, and it is
+thus possible that items can be removed also from the local list
+concurrently with list iteration.
+
+Please refer to the :ref:`DRM GPUVM locking section
+<drm_gpuvm_locking>` and its internal
+:c:func:`get_next_vm_bo_from_list` function.
+
+
+userptr gpu_vmas
+================
+
+A userptr gpu_vma is a gpu_vma that, instead of mapping a buffer object to a
+GPU virtual address range, directly maps a CPU mm range of anonymous-
+or file page-cache pages.
+A very simple approach would be to just pin the pages using
+pin_user_pages() at bind time and unpin them at unbind time, but this
+creates a Denial-Of-Service vector since a single user-space process
+would be able to pin down all of system memory, which is not
+desirable. (For special use-cases and assuming proper accounting pinning might
+still be a desirable feature, though). What we need to do in the
+general case is to obtain a reference to the desired pages, make sure
+we are notified using a MMU notifier just before the CPU mm unmaps the
+pages, dirty them if they are not mapped read-only to the GPU, and
+then drop the reference.
+When we are notified by the MMU notifier that CPU mm is about to drop the
+pages, we need to stop GPU access to the pages by waiting for VM idle
+in the MMU notifier and make sure that before the next time the GPU
+tries to access whatever is now present in the CPU mm range, we unmap
+the old pages from the GPU page tables and repeat the process of
+obtaining new page references. (See the :ref:`notifier example
+<Invalidation example>` below). Note that when the core mm decides to
+laundry pages, we get such an unmap MMU notification and can mark the
+pages dirty again before the next GPU access. We also get similar MMU
+notifications for NUMA accounting which the GPU driver doesn't really
+need to care about, but so far it has proven difficult to exclude
+certain notifications.
+
+Using a MMU notifier for device DMA (and other methods) is described in
+:ref:`the pin_user_pages() documentation <mmu-notifier-registration-case>`.
+
+Now, the method of obtaining struct page references using
+get_user_pages() unfortunately can't be used under a dma_resv lock
+since that would violate the locking order of the dma_resv lock vs the
+mmap_lock that is grabbed when resolving a CPU pagefault. This means
+the gpu_vm's list of userptr gpu_vmas needs to be protected by an
+outer lock, which in our example below is the ``gpu_vm->lock``.
+
+The MMU interval seqlock for a userptr gpu_vma is used in the following
+way:
+
+.. code-block:: C
+
+ // Exclusive locking mode here is strictly needed only if there are
+ // invalidated userptr gpu_vmas present, to avoid concurrent userptr
+ // revalidations of the same userptr gpu_vma.
+ down_write(&gpu_vm->lock);
+ retry:
+
+ // Note: mmu_interval_read_begin() blocks until there is no
+ // invalidation notifier running anymore.
+ seq = mmu_interval_read_begin(&gpu_vma->userptr_interval);
+ if (seq != gpu_vma->saved_seq) {
+ obtain_new_page_pointers(&gpu_vma);
+ dma_resv_lock(&gpu_vm->resv);
+ add_gpu_vma_to_revalidate_list(&gpu_vma, &gpu_vm);
+ dma_resv_unlock(&gpu_vm->resv);
+ gpu_vma->saved_seq = seq;
+ }
+
+ // The usual revalidation goes here.
+
+ // Final userptr sequence validation may not happen before the
+ // submission dma_fence is added to the gpu_vm's resv, from the POW
+ // of the MMU invalidation notifier. Hence the
+ // userptr_notifier_lock that will make them appear atomic.
+
+ add_dependencies(&gpu_job, &gpu_vm->resv);
+ down_read(&gpu_vm->userptr_notifier_lock);
+ if (mmu_interval_read_retry(&gpu_vma->userptr_interval, gpu_vma->saved_seq)) {
+ up_read(&gpu_vm->userptr_notifier_lock);
+ goto retry;
+ }
+
+ job_dma_fence = gpu_submit(&gpu_job));
+
+ add_dma_fence(job_dma_fence, &gpu_vm->resv);
+
+ for_each_external_obj(gpu_vm, &obj)
+ add_dma_fence(job_dma_fence, &obj->resv);
+
+ dma_resv_unlock_all_resv_locks();
+ up_read(&gpu_vm->userptr_notifier_lock);
+ up_write(&gpu_vm->lock);
+
+The code between ``mmu_interval_read_begin()`` and the
+``mmu_interval_read_retry()`` marks the read side critical section of
+what we call the ``userptr_seqlock``. In reality, the gpu_vm's userptr
+gpu_vma list is looped through, and the check is done for *all* of its
+userptr gpu_vmas, although we only show a single one here.
+
+The userptr gpu_vma MMU invalidation notifier might be called from
+reclaim context and, again, to avoid locking order violations, we can't
+take any dma_resv lock nor the gpu_vm->lock from within it.
+
+.. _Invalidation example:
+.. code-block:: C
+
+ bool gpu_vma_userptr_invalidate(userptr_interval, cur_seq)
+ {
+ // Make sure the exec function either sees the new sequence
+ // and backs off or we wait for the dma-fence:
+
+ down_write(&gpu_vm->userptr_notifier_lock);
+ mmu_interval_set_seq(userptr_interval, cur_seq);
+ up_write(&gpu_vm->userptr_notifier_lock);
+
+ // At this point, the exec function can't succeed in
+ // submitting a new job, because cur_seq is an invalid
+ // sequence number and will always cause a retry. When all
+ // invalidation callbacks, the mmu notifier core will flip
+ // the sequence number to a valid one. However we need to
+ // stop gpu access to the old pages here.
+
+ dma_resv_wait_timeout(&gpu_vm->resv, DMA_RESV_USAGE_BOOKKEEP,
+ false, MAX_SCHEDULE_TIMEOUT);
+ return true;
+ }
+
+When this invalidation notifier returns, the GPU can no longer be
+accessing the old pages of the userptr gpu_vma and needs to redo the
+page-binding before a new GPU submission can succeed.
+
+Efficient userptr gpu_vma exec_function iteration
+_________________________________________________
+
+If the gpu_vm's list of userptr gpu_vmas becomes large, it's
+inefficient to iterate through the complete lists of userptrs on each
+exec function to check whether each userptr gpu_vma's saved
+sequence number is stale. A solution to this is to put all
+*invalidated* userptr gpu_vmas on a separate gpu_vm list and
+only check the gpu_vmas present on this list on each exec
+function. This list will then lend itself very-well to the spinlock
+locking scheme that is
+:ref:`described in the spinlock iteration section <Spinlock iteration>`, since
+in the mmu notifier, where we add the invalidated gpu_vmas to the
+list, it's not possible to take any outer locks like the
+``gpu_vm->lock`` or the ``gpu_vm->resv`` lock. Note that the
+``gpu_vm->lock`` still needs to be taken while iterating to ensure the list is
+complete, as also mentioned in that section.
+
+If using an invalidated userptr list like this, the retry check in the
+exec function trivially becomes a check for invalidated list empty.
+
+Locking at bind and unbind time
+===============================
+
+At bind time, assuming a GEM object backed gpu_vma, each
+gpu_vma needs to be associated with a gpu_vm_bo and that
+gpu_vm_bo in turn needs to be added to the GEM object's
+gpu_vm_bo list, and possibly to the gpu_vm's external object
+list. This is referred to as *linking* the gpu_vma, and typically
+requires that the ``gpu_vm->lock`` and the ``gem_object->gpuva_lock``
+are held. When unlinking a gpu_vma the same locks should be held,
+and that ensures that when iterating over ``gpu_vmas`, either under
+the ``gpu_vm->resv`` or the GEM object's dma_resv, that the gpu_vmas
+stay alive as long as the lock under which we iterate is not released. For
+userptr gpu_vmas it's similarly required that during vma destroy, the
+outer ``gpu_vm->lock`` is held, since otherwise when iterating over
+the invalidated userptr list as described in the previous section,
+there is nothing keeping those userptr gpu_vmas alive.
+
+Locking for recoverable page-fault page-table updates
+=====================================================
+
+There are two important things we need to ensure with locking for
+recoverable page-faults:
+
+* At the time we return pages back to the system / allocator for
+ reuse, there should be no remaining GPU mappings and any GPU TLB
+ must have been flushed.
+* The unmapping and mapping of a gpu_vma must not race.
+
+Since the unmapping (or zapping) of GPU ptes is typically taking place
+where it is hard or even impossible to take any outer level locks we
+must either introduce a new lock that is held at both mapping and
+unmapping time, or look at the locks we do hold at unmapping time and
+make sure that they are held also at mapping time. For userptr
+gpu_vmas, the ``userptr_seqlock`` is held in write mode in the mmu
+invalidation notifier where zapping happens. Hence, if the
+``userptr_seqlock`` as well as the ``gpu_vm->userptr_notifier_lock``
+is held in read mode during mapping, it will not race with the
+zapping. For GEM object backed gpu_vmas, zapping will take place under
+the GEM object's dma_resv and ensuring that the dma_resv is held also
+when populating the page-tables for any gpu_vma pointing to the GEM
+object, will similarly ensure we are race-free.
+
+If any part of the mapping is performed asynchronously
+under a dma-fence with these locks released, the zapping will need to
+wait for that dma-fence to signal under the relevant lock before
+starting to modify the page-table.
+
+Since modifying the
+page-table structure in a way that frees up page-table memory
+might also require outer level locks, the zapping of GPU ptes
+typically focuses only on zeroing page-table or page-directory entries
+and flushing TLB, whereas freeing of page-table memory is deferred to
+unbind or rebind time.
diff --git a/Documentation/gpu/imagination/index.rst b/Documentation/gpu/imagination/index.rst
index dc9579e758c3..0c1e247cea41 100644
--- a/Documentation/gpu/imagination/index.rst
+++ b/Documentation/gpu/imagination/index.rst
@@ -3,7 +3,7 @@ drm/imagination PowerVR Graphics Driver
=======================================
.. kernel-doc:: drivers/gpu/drm/imagination/pvr_drv.c
- :doc: PowerVR Graphics Driver
+ :doc: PowerVR (Series 6 and later) and IMG Graphics Driver
Contents
========
diff --git a/Documentation/gpu/imagination/uapi.rst b/Documentation/gpu/imagination/uapi.rst
index 2227ea7e6222..7502413d0a93 100644
--- a/Documentation/gpu/imagination/uapi.rst
+++ b/Documentation/gpu/imagination/uapi.rst
@@ -46,9 +46,6 @@ DEV_QUERY
drm_pvr_dev_query_heap_info
.. kernel-doc:: include/uapi/drm/pvr_drm.h
- :doc: Flags for DRM_PVR_DEV_QUERY_HEAP_INFO_GET.
-
-.. kernel-doc:: include/uapi/drm/pvr_drm.h
:identifiers: drm_pvr_static_data_area_usage
drm_pvr_static_data_area
drm_pvr_dev_query_static_data_areas
@@ -121,7 +118,7 @@ CREATE_FREE_LIST and DESTROY_FREE_LIST
:identifiers: drm_pvr_ioctl_destroy_free_list_args
CREATE_HWRT_DATASET and DESTROY_HWRT_DATASET
---------------------------------------
+--------------------------------------------
.. kernel-doc:: include/uapi/drm/pvr_drm.h
:doc: PowerVR IOCTL CREATE_HWRT_DATASET and DESTROY_HWRT_DATASET interfaces
diff --git a/Documentation/gpu/implementation_guidelines.rst b/Documentation/gpu/implementation_guidelines.rst
index 138e637dcc6b..dbccfa72f1c9 100644
--- a/Documentation/gpu/implementation_guidelines.rst
+++ b/Documentation/gpu/implementation_guidelines.rst
@@ -7,3 +7,4 @@ Misc DRM driver uAPI- and feature implementation guidelines
.. toctree::
drm-vm-bind-async
+ drm-vm-bind-locking
diff --git a/Documentation/gpu/rfc/xe.rst b/Documentation/gpu/rfc/xe.rst
index c29113a0ac30..ceb21219d52e 100644
--- a/Documentation/gpu/rfc/xe.rst
+++ b/Documentation/gpu/rfc/xe.rst
@@ -123,10 +123,15 @@ Documentation should include:
* O(1) complexity under VM_BIND.
+The document is now included in the drm documentation :doc:`here </gpu/drm-vm-bind-async>`.
+
Some parts of userptr like mmu_notifiers should become GPUVA or DRM helpers when
the second driver supporting VM_BIND+userptr appears. Details to be defined when
the time comes.
+The DRM GPUVM helpers do not yet include the userptr parts, but discussions
+about implementing them are ongoing.
+
Long running compute: minimal data structure/scaffolding
--------------------------------------------------------
The generic scheduler code needs to include the handling of endless compute
diff --git a/Documentation/gpu/todo.rst b/Documentation/gpu/todo.rst
index b62c7fa0c2bc..41a264bf84ce 100644
--- a/Documentation/gpu/todo.rst
+++ b/Documentation/gpu/todo.rst
@@ -337,8 +337,8 @@ connector register/unregister fixes
Level: Intermediate
-Remove load/unload callbacks from all non-DRIVER_LEGACY drivers
----------------------------------------------------------------
+Remove load/unload callbacks
+----------------------------
The load/unload callbacks in struct &drm_driver are very much midlayers, plus
for historical reasons they get the ordering wrong (and we can't fix that)
@@ -347,8 +347,7 @@ between setting up the &drm_driver structure and calling drm_dev_register().
- Rework drivers to no longer use the load/unload callbacks, directly coding the
load/unload sequence into the driver's probe function.
-- Once all non-DRIVER_LEGACY drivers are converted, disallow the load/unload
- callbacks for all modern drivers.
+- Once all drivers are converted, remove the load/unload callbacks.
Contact: Daniel Vetter
@@ -782,6 +781,29 @@ Contact: Hans de Goede
Level: Advanced
+Buffer age or other damage accumulation algorithm for buffer damage
+===================================================================
+
+Drivers that do per-buffer uploads, need a buffer damage handling (rather than
+frame damage like drivers that do per-plane or per-CRTC uploads), but there is
+no support to get the buffer age or any other damage accumulation algorithm.
+
+For this reason, the damage helpers just fallback to a full plane update if the
+framebuffer attached to a plane has changed since the last page-flip. Drivers
+set &drm_plane_state.ignore_damage_clips to true as indication to
+drm_atomic_helper_damage_iter_init() and drm_atomic_helper_damage_iter_next()
+helpers that the damage clips should be ignored.
+
+This should be improved to get damage tracking properly working on drivers that
+do per-buffer uploads.
+
+More information about damage tracking and references to learning materials can
+be found in :ref:`damage_tracking_properties`.
+
+Contact: Javier Martinez Canillas <[email protected]>
+
+Level: Advanced
+
Outside DRM
===========
diff --git a/Documentation/networking/tcp_ao.rst b/Documentation/networking/tcp_ao.rst
index cfa5bf1cc542..8a58321acce7 100644
--- a/Documentation/networking/tcp_ao.rst
+++ b/Documentation/networking/tcp_ao.rst
@@ -99,7 +99,7 @@ also [6.1]::
when it is no longer considered permitted.
Linux TCP-AO will try its best to prevent you from removing a key that's
-being used, considering it a key management failure. But sine keeping
+being used, considering it a key management failure. But since keeping
an outdated key may become a security issue and as a peer may
unintentionally prevent the removal of an old key by always setting
it as RNextKeyID - a forced key removal mechanism is provided, where
diff --git a/Documentation/trace/coresight/coresight.rst b/Documentation/trace/coresight/coresight.rst
index 4a71ea6cb390..826e59a698da 100644
--- a/Documentation/trace/coresight/coresight.rst
+++ b/Documentation/trace/coresight/coresight.rst
@@ -130,7 +130,7 @@ Misc:
Device Tree Bindings
--------------------
-See Documentation/devicetree/bindings/arm/arm,coresight-\*.yaml for details.
+See ``Documentation/devicetree/bindings/arm/arm,coresight-*.yaml`` for details.
As of this writing drivers for ITM, STMs and CTIs are not provided but are
expected to be added as the solution matures.
diff --git a/MAINTAINERS b/MAINTAINERS
index 0ba904b46efe..c1a2cac80f50 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -171,13 +171,10 @@ S: Supported
F: drivers/soc/fujitsu/a64fx-diag.c
A8293 MEDIA DRIVER
-M: Antti Palosaari <[email protected]>
-S: Maintained
+S: Orphan
W: https://linuxtv.org
-W: http://palosaari.fi/linux/
Q: http://patchwork.linuxtv.org/project/linux-media/list/
-T: git git://linuxtv.org/anttip/media_tree.git
F: drivers/media/dvb-frontends/a8293*
AACRAID SCSI RAID DRIVER
@@ -576,23 +573,17 @@ F: drivers/iio/accel/adxl372_i2c.c
F: drivers/iio/accel/adxl372_spi.c
AF9013 MEDIA DRIVER
-M: Antti Palosaari <[email protected]>
-S: Maintained
+S: Orphan
W: https://linuxtv.org
-W: http://palosaari.fi/linux/
Q: http://patchwork.linuxtv.org/project/linux-media/list/
-T: git git://linuxtv.org/anttip/media_tree.git
F: drivers/media/dvb-frontends/af9013*
AF9033 MEDIA DRIVER
-M: Antti Palosaari <[email protected]>
-S: Maintained
+S: Orphan
W: https://linuxtv.org
-W: http://palosaari.fi/linux/
Q: http://patchwork.linuxtv.org/project/linux-media/list/
-T: git git://linuxtv.org/anttip/media_tree.git
F: drivers/media/dvb-frontends/af9033*
AFFS FILE SYSTEM
@@ -650,13 +641,10 @@ F: fs/aio.c
F: include/linux/*aio*.h
AIRSPY MEDIA DRIVER
-M: Antti Palosaari <[email protected]>
-S: Maintained
+S: Orphan
W: https://linuxtv.org
-W: http://palosaari.fi/linux/
Q: http://patchwork.linuxtv.org/project/linux-media/list/
-T: git git://linuxtv.org/anttip/media_tree.git
F: drivers/media/usb/airspy/
ALACRITECH GIGABIT ETHERNET DRIVER
@@ -2155,6 +2143,7 @@ S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/shawnguo/linux.git
F: arch/arm/boot/dts/nxp/imx/
F: arch/arm/boot/dts/nxp/mxs/
+F: arch/arm64/boot/dts/freescale/
X: arch/arm64/boot/dts/freescale/fsl-*
X: arch/arm64/boot/dts/freescale/qoriq-*
X: drivers/media/i2c/
@@ -2535,7 +2524,7 @@ F: drivers/*/*/*wpcm*
F: drivers/*/*wpcm*
ARM/NXP S32G ARCHITECTURE
-M: Chester Lin <[email protected]>
+M: Chester Lin <[email protected]>
R: Andreas Färber <[email protected]>
R: Matthias Brugger <[email protected]>
R: NXP S32 Linux Team <[email protected]>
@@ -5076,7 +5065,6 @@ CLANG CONTROL FLOW INTEGRITY SUPPORT
M: Sami Tolvanen <[email protected]>
M: Kees Cook <[email protected]>
R: Nathan Chancellor <[email protected]>
-R: Nick Desaulniers <[email protected]>
S: Supported
B: https://github.com/ClangBuiltLinux/linux/issues
@@ -5091,8 +5079,9 @@ F: .clang-format
CLANG/LLVM BUILD SUPPORT
M: Nathan Chancellor <[email protected]>
-M: Nick Desaulniers <[email protected]>
-R: Tom Rix <[email protected]>
+R: Nick Desaulniers <[email protected]>
+R: Bill Wendling <[email protected]>
+R: Justin Stitt <[email protected]>
S: Supported
W: https://clangbuiltlinux.github.io/
@@ -5242,7 +5231,6 @@ F: drivers/platform/x86/compal-laptop.c
COMPILER ATTRIBUTES
M: Miguel Ojeda <[email protected]>
-R: Nick Desaulniers <[email protected]>
S: Maintained
F: include/linux/compiler_attributes.h
@@ -5605,13 +5593,10 @@ F: Documentation/driver-api/media/drivers/cx88*
F: drivers/media/pci/cx88/
CXD2820R MEDIA DRIVER
-M: Antti Palosaari <[email protected]>
-S: Maintained
+S: Orphan
W: https://linuxtv.org
-W: http://palosaari.fi/linux/
Q: http://patchwork.linuxtv.org/project/linux-media/list/
-T: git git://linuxtv.org/anttip/media_tree.git
F: drivers/media/dvb-frontends/cxd2820r*
CXGB3 ETHERNET DRIVER (CXGB3)
@@ -5724,13 +5709,10 @@ F: Documentation/devicetree/bindings/input/cypress-sf.yaml
F: drivers/input/keyboard/cypress-sf.c
CYPRESS_FIRMWARE MEDIA DRIVER
-M: Antti Palosaari <[email protected]>
-S: Maintained
+S: Orphan
W: https://linuxtv.org
-W: http://palosaari.fi/linux/
Q: http://patchwork.linuxtv.org/project/linux-media/list/
-T: git git://linuxtv.org/anttip/media_tree.git
F: drivers/media/common/cypress_firmware*
CYTTSP TOUCHSCREEN DRIVER
@@ -7317,53 +7299,38 @@ T: git git://linuxtv.org/media_tree.git
F: drivers/media/pci/dt3155/
DVB_USB_AF9015 MEDIA DRIVER
-M: Antti Palosaari <[email protected]>
-S: Maintained
+S: Orphan
W: https://linuxtv.org
-W: http://palosaari.fi/linux/
Q: http://patchwork.linuxtv.org/project/linux-media/list/
-T: git git://linuxtv.org/anttip/media_tree.git
F: drivers/media/usb/dvb-usb-v2/af9015*
DVB_USB_AF9035 MEDIA DRIVER
-M: Antti Palosaari <[email protected]>
-S: Maintained
+S: Orphan
W: https://linuxtv.org
-W: http://palosaari.fi/linux/
Q: http://patchwork.linuxtv.org/project/linux-media/list/
-T: git git://linuxtv.org/anttip/media_tree.git
F: drivers/media/usb/dvb-usb-v2/af9035*
DVB_USB_ANYSEE MEDIA DRIVER
-M: Antti Palosaari <[email protected]>
-S: Maintained
+S: Orphan
W: https://linuxtv.org
-W: http://palosaari.fi/linux/
Q: http://patchwork.linuxtv.org/project/linux-media/list/
-T: git git://linuxtv.org/anttip/media_tree.git
F: drivers/media/usb/dvb-usb-v2/anysee*
DVB_USB_AU6610 MEDIA DRIVER
-M: Antti Palosaari <[email protected]>
-S: Maintained
+S: Orphan
W: https://linuxtv.org
-W: http://palosaari.fi/linux/
Q: http://patchwork.linuxtv.org/project/linux-media/list/
-T: git git://linuxtv.org/anttip/media_tree.git
F: drivers/media/usb/dvb-usb-v2/au6610*
DVB_USB_CE6230 MEDIA DRIVER
-M: Antti Palosaari <[email protected]>
-S: Maintained
+S: Orphan
W: https://linuxtv.org
-W: http://palosaari.fi/linux/
Q: http://patchwork.linuxtv.org/project/linux-media/list/
-T: git git://linuxtv.org/anttip/media_tree.git
F: drivers/media/usb/dvb-usb-v2/ce6230*
DVB_USB_CXUSB MEDIA DRIVER
@@ -7377,22 +7344,17 @@ T: git git://linuxtv.org/media_tree.git
F: drivers/media/usb/dvb-usb/cxusb*
DVB_USB_EC168 MEDIA DRIVER
-M: Antti Palosaari <[email protected]>
-S: Maintained
+S: Orphan
W: https://linuxtv.org
-W: http://palosaari.fi/linux/
Q: http://patchwork.linuxtv.org/project/linux-media/list/
-T: git git://linuxtv.org/anttip/media_tree.git
F: drivers/media/usb/dvb-usb-v2/ec168*
DVB_USB_GL861 MEDIA DRIVER
-M: Antti Palosaari <[email protected]>
-S: Maintained
+S: Orphan
W: https://linuxtv.org
Q: http://patchwork.linuxtv.org/project/linux-media/list/
-T: git git://linuxtv.org/anttip/media_tree.git
F: drivers/media/usb/dvb-usb-v2/gl861*
DVB_USB_MXL111SF MEDIA DRIVER
@@ -7406,23 +7368,18 @@ T: git git://linuxtv.org/mkrufky/mxl111sf.git
F: drivers/media/usb/dvb-usb-v2/mxl111sf*
DVB_USB_RTL28XXU MEDIA DRIVER
-M: Antti Palosaari <[email protected]>
-S: Maintained
+S: Orphan
W: https://linuxtv.org
-W: http://palosaari.fi/linux/
Q: http://patchwork.linuxtv.org/project/linux-media/list/
-T: git git://linuxtv.org/anttip/media_tree.git
F: drivers/media/usb/dvb-usb-v2/rtl28xxu*
DVB_USB_V2 MEDIA DRIVER
-M: Antti Palosaari <[email protected]>
-S: Maintained
+S: Orphan
W: https://linuxtv.org
W: http://palosaari.fi/linux/
Q: http://patchwork.linuxtv.org/project/linux-media/list/
-T: git git://linuxtv.org/anttip/media_tree.git
F: drivers/media/usb/dvb-usb-v2/dvb_usb*
F: drivers/media/usb/dvb-usb-v2/usb_urb.c
@@ -7464,13 +7421,10 @@ F: Documentation/devicetree/bindings/input/e3x0-button.txt
F: drivers/input/misc/e3x0-button.c
E4000 MEDIA DRIVER
-M: Antti Palosaari <[email protected]>
-S: Maintained
+S: Orphan
W: https://linuxtv.org
-W: http://palosaari.fi/linux/
Q: http://patchwork.linuxtv.org/project/linux-media/list/
-T: git git://linuxtv.org/anttip/media_tree.git
F: drivers/media/tuners/e4000*
EARTH_PT1 MEDIA DRIVER
@@ -7486,13 +7440,10 @@ S: Odd Fixes
F: drivers/media/pci/pt3/
EC100 MEDIA DRIVER
-M: Antti Palosaari <[email protected]>
-S: Maintained
+S: Orphan
W: https://linuxtv.org
-W: http://palosaari.fi/linux/
Q: http://patchwork.linuxtv.org/project/linux-media/list/
-T: git git://linuxtv.org/anttip/media_tree.git
F: drivers/media/dvb-frontends/ec100*
ECRYPT FILE SYSTEM
@@ -8110,13 +8061,10 @@ F: drivers/media/tuners/fc0011.c
F: drivers/media/tuners/fc0011.h
FC2580 MEDIA DRIVER
-M: Antti Palosaari <[email protected]>
-S: Maintained
+S: Orphan
W: https://linuxtv.org
-W: http://palosaari.fi/linux/
Q: http://patchwork.linuxtv.org/project/linux-media/list/
-T: git git://linuxtv.org/anttip/media_tree.git
F: drivers/media/tuners/fc2580*
FCOE SUBSYSTEM (libfc, libfcoe, fcoe)
@@ -9246,13 +9194,10 @@ F: include/trace/events/habanalabs.h
F: include/uapi/drm/habanalabs_accel.h
HACKRF MEDIA DRIVER
-M: Antti Palosaari <[email protected]>
-S: Maintained
+S: Orphan
W: https://linuxtv.org
-W: http://palosaari.fi/linux/
Q: http://patchwork.linuxtv.org/project/linux-media/list/
-T: git git://linuxtv.org/anttip/media_tree.git
F: drivers/media/usb/hackrf/
HANDSHAKE UPCALL FOR TRANSPORT LAYER SECURITY
@@ -9625,6 +9570,7 @@ F: drivers/crypto/hisilicon/sgl.c
F: include/linux/hisi_acc_qm.h
HISILICON ROCE DRIVER
+M: Chengchang Tang <[email protected]>
M: Junxian Huang <[email protected]>
S: Maintained
@@ -10397,6 +10343,7 @@ M: Frank Binns <[email protected]>
M: Donald Robson <[email protected]>
M: Matt Coster <[email protected]>
S: Supported
+T: git git://anongit.freedesktop.org/drm/drm-misc
F: Documentation/devicetree/bindings/gpu/img,powervr.yaml
F: Documentation/gpu/imagination/
F: drivers/gpu/drm/imagination/
@@ -11335,13 +11282,10 @@ F: Documentation/hwmon/it87.rst
F: drivers/hwmon/it87.c
IT913X MEDIA DRIVER
-M: Antti Palosaari <[email protected]>
-S: Maintained
+S: Orphan
W: https://linuxtv.org
-W: http://palosaari.fi/linux/
Q: http://patchwork.linuxtv.org/project/linux-media/list/
-T: git git://linuxtv.org/anttip/media_tree.git
F: drivers/media/tuners/it913x*
ITE IT66121 HDMI BRIDGE DRIVER
@@ -11523,7 +11467,6 @@ F: fs/autofs/
KERNEL BUILD + files below scripts/ (unless maintained elsewhere)
M: Masahiro Yamada <[email protected]>
R: Nathan Chancellor <[email protected]>
-R: Nick Desaulniers <[email protected]>
R: Nicolas Schier <[email protected]>
S: Maintained
@@ -12215,6 +12158,13 @@ F: include/linux/nd.h
F: include/uapi/linux/ndctl.h
F: tools/testing/nvdimm/
+LIBRARY CODE
+M: Andrew Morton <[email protected]>
+S: Supported
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm.git mm-nonmm-unstable
+F: lib/*
+
LICENSES and SPDX stuff
M: Thomas Gleixner <[email protected]>
M: Greg Kroah-Hartman <[email protected]>
@@ -12694,13 +12644,10 @@ W: http://www.tazenda.demon.co.uk/phil/linux-hp
F: arch/m68k/hp300/
M88DS3103 MEDIA DRIVER
-M: Antti Palosaari <[email protected]>
-S: Maintained
+S: Orphan
W: https://linuxtv.org
-W: http://palosaari.fi/linux/
Q: http://patchwork.linuxtv.org/project/linux-media/list/
-T: git git://linuxtv.org/anttip/media_tree.git
F: drivers/media/dvb-frontends/m88ds3103*
M88RS2000 MEDIA DRIVER
@@ -14594,20 +14541,16 @@ F: include/asm-generic/tlb.h
F: mm/mmu_gather.c
MN88472 MEDIA DRIVER
-M: Antti Palosaari <[email protected]>
-S: Maintained
+S: Orphan
W: https://linuxtv.org
-W: http://palosaari.fi/linux/
Q: http://patchwork.linuxtv.org/project/linux-media/list/
F: drivers/media/dvb-frontends/mn88472*
MN88473 MEDIA DRIVER
-M: Antti Palosaari <[email protected]>
-S: Maintained
+S: Orphan
W: https://linuxtv.org
-W: http://palosaari.fi/linux/
Q: http://patchwork.linuxtv.org/project/linux-media/list/
F: drivers/media/dvb-frontends/mn88473*
@@ -14695,23 +14638,17 @@ S: Orphan
F: drivers/platform/x86/msi-wmi.c
MSI001 MEDIA DRIVER
-M: Antti Palosaari <[email protected]>
-S: Maintained
+S: Orphan
W: https://linuxtv.org
-W: http://palosaari.fi/linux/
Q: http://patchwork.linuxtv.org/project/linux-media/list/
-T: git git://linuxtv.org/anttip/media_tree.git
F: drivers/media/tuners/msi001*
MSI2500 MEDIA DRIVER
-M: Antti Palosaari <[email protected]>
-S: Maintained
+S: Orphan
W: https://linuxtv.org
-W: http://palosaari.fi/linux/
Q: http://patchwork.linuxtv.org/project/linux-media/list/
-T: git git://linuxtv.org/anttip/media_tree.git
F: drivers/media/usb/msi2500/
MSTAR INTERRUPT CONTROLLER DRIVER
@@ -15075,6 +15012,7 @@ F: lib/random32.c
F: net/
F: tools/net/
F: tools/testing/selftests/net/
+X: net/9p/
X: net/bluetooth/
NETWORKING [IPSEC]
@@ -17782,13 +17720,10 @@ F: drivers/bus/fsl-mc/
F: include/uapi/linux/fsl_mc.h
QT1010 MEDIA DRIVER
-M: Antti Palosaari <[email protected]>
-S: Maintained
+S: Orphan
W: https://linuxtv.org
-W: http://palosaari.fi/linux/
Q: http://patchwork.linuxtv.org/project/linux-media/list/
-T: git git://linuxtv.org/anttip/media_tree.git
F: drivers/media/tuners/qt1010*
QUALCOMM ATH12K WIRELESS DRIVER
@@ -17955,6 +17890,8 @@ L: [email protected]
S: Maintained
F: drivers/iommu/arm/arm-smmu/qcom_iommu.c
+F: drivers/iommu/arm/arm-smmu/arm-smmu-qcom*
+F: drivers/iommu/msm_iommu*
QUALCOMM IPC ROUTER (QRTR) DRIVER
M: Manivannan Sadhasivam <[email protected]>
@@ -18841,33 +18778,24 @@ S: Maintained
F: drivers/tty/rpmsg_tty.c
RTL2830 MEDIA DRIVER
-M: Antti Palosaari <[email protected]>
-S: Maintained
+S: Orphan
W: https://linuxtv.org
-W: http://palosaari.fi/linux/
Q: http://patchwork.linuxtv.org/project/linux-media/list/
-T: git git://linuxtv.org/anttip/media_tree.git
F: drivers/media/dvb-frontends/rtl2830*
RTL2832 MEDIA DRIVER
-M: Antti Palosaari <[email protected]>
-S: Maintained
+S: Orphan
W: https://linuxtv.org
-W: http://palosaari.fi/linux/
Q: http://patchwork.linuxtv.org/project/linux-media/list/
-T: git git://linuxtv.org/anttip/media_tree.git
F: drivers/media/dvb-frontends/rtl2832*
RTL2832_SDR MEDIA DRIVER
-M: Antti Palosaari <[email protected]>
-S: Maintained
+S: Orphan
W: https://linuxtv.org
-W: http://palosaari.fi/linux/
Q: http://patchwork.linuxtv.org/project/linux-media/list/
-T: git git://linuxtv.org/anttip/media_tree.git
F: drivers/media/dvb-frontends/rtl2832_sdr*
RTL8180 WIRELESS DRIVER
@@ -19677,13 +19605,10 @@ F: drivers/media/platform/renesas/sh_vou.c
F: include/media/drv-intf/sh_vou.h
SI2157 MEDIA DRIVER
-M: Antti Palosaari <[email protected]>
-S: Maintained
+S: Orphan
W: https://linuxtv.org
-W: http://palosaari.fi/linux/
Q: http://patchwork.linuxtv.org/project/linux-media/list/
-T: git git://linuxtv.org/anttip/media_tree.git
F: drivers/media/tuners/si2157*
SI2165 MEDIA DRIVER
@@ -19695,13 +19620,10 @@ Q: http://patchwork.linuxtv.org/project/linux-media/list/
F: drivers/media/dvb-frontends/si2165*
SI2168 MEDIA DRIVER
-M: Antti Palosaari <[email protected]>
-S: Maintained
+S: Orphan
W: https://linuxtv.org
-W: http://palosaari.fi/linux/
Q: http://patchwork.linuxtv.org/project/linux-media/list/
-T: git git://linuxtv.org/anttip/media_tree.git
F: drivers/media/dvb-frontends/si2168*
SI470X FM RADIO RECEIVER I2C DRIVER
@@ -21203,33 +21125,24 @@ W: http://tcp-lp-mod.sourceforge.net/
F: net/ipv4/tcp_lp.c
TDA10071 MEDIA DRIVER
-M: Antti Palosaari <[email protected]>
-S: Maintained
+S: Orphan
W: https://linuxtv.org
-W: http://palosaari.fi/linux/
Q: http://patchwork.linuxtv.org/project/linux-media/list/
-T: git git://linuxtv.org/anttip/media_tree.git
F: drivers/media/dvb-frontends/tda10071*
TDA18212 MEDIA DRIVER
-M: Antti Palosaari <[email protected]>
-S: Maintained
+S: Orphan
W: https://linuxtv.org
-W: http://palosaari.fi/linux/
Q: http://patchwork.linuxtv.org/project/linux-media/list/
-T: git git://linuxtv.org/anttip/media_tree.git
F: drivers/media/tuners/tda18212*
TDA18218 MEDIA DRIVER
-M: Antti Palosaari <[email protected]>
-S: Maintained
+S: Orphan
W: https://linuxtv.org
-W: http://palosaari.fi/linux/
Q: http://patchwork.linuxtv.org/project/linux-media/list/
-T: git git://linuxtv.org/anttip/media_tree.git
F: drivers/media/tuners/tda18218*
TDA18250 MEDIA DRIVER
@@ -22165,13 +22078,10 @@ F: include/uapi/linux/serial_core.h
F: include/uapi/linux/tty.h
TUA9001 MEDIA DRIVER
-M: Antti Palosaari <[email protected]>
-S: Maintained
+S: Orphan
W: https://linuxtv.org
-W: http://palosaari.fi/linux/
Q: http://patchwork.linuxtv.org/project/linux-media/list/
-T: git git://linuxtv.org/anttip/media_tree.git
F: drivers/media/tuners/tua9001*
TULIP NETWORK DRIVERS
@@ -24116,20 +24026,16 @@ S: Orphan
F: drivers/net/wireless/zydas/zd1211rw/
ZD1301 MEDIA DRIVER
-M: Antti Palosaari <[email protected]>
-S: Maintained
+S: Orphan
W: https://linuxtv.org/
-W: http://palosaari.fi/linux/
Q: https://patchwork.linuxtv.org/project/linux-media/list/
F: drivers/media/usb/dvb-usb-v2/zd1301*
ZD1301_DEMOD MEDIA DRIVER
-M: Antti Palosaari <[email protected]>
-S: Maintained
+S: Orphan
W: https://linuxtv.org/
-W: http://palosaari.fi/linux/
Q: https://patchwork.linuxtv.org/project/linux-media/list/
F: drivers/media/dvb-frontends/zd1301_demod*
diff --git a/Makefile b/Makefile
index 99db546fbb45..70fc4c11dfc0 100644
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
VERSION = 6
PATCHLEVEL = 7
SUBLEVEL = 0
-EXTRAVERSION = -rc3
+EXTRAVERSION = -rc5
NAME = Hurr durr I'ma ninja sloth
# *DOCUMENTATION*
diff --git a/arch/arm/boot/dts/broadcom/bcm2711-rpi-400.dts b/arch/arm/boot/dts/broadcom/bcm2711-rpi-400.dts
index 1ab8184302db..5a2869a18bd5 100644
--- a/arch/arm/boot/dts/broadcom/bcm2711-rpi-400.dts
+++ b/arch/arm/boot/dts/broadcom/bcm2711-rpi-400.dts
@@ -36,9 +36,7 @@
gpios = <&gpio 42 GPIO_ACTIVE_HIGH>;
};
-&leds {
- /delete-node/ led_act;
-};
+/delete-node/ &led_act;
&pm {
/delete-property/ system-power-controller;
diff --git a/arch/arm/boot/dts/nxp/imx/imx6q-skov-reve-mi1010ait-1cp1.dts b/arch/arm/boot/dts/nxp/imx/imx6q-skov-reve-mi1010ait-1cp1.dts
index a3f247c722b4..0342a79ccd5d 100644
--- a/arch/arm/boot/dts/nxp/imx/imx6q-skov-reve-mi1010ait-1cp1.dts
+++ b/arch/arm/boot/dts/nxp/imx/imx6q-skov-reve-mi1010ait-1cp1.dts
@@ -37,9 +37,9 @@
&clks {
assigned-clocks = <&clks IMX6QDL_CLK_LDB_DI0_SEL>,
- <&clks IMX6QDL_CLK_LDB_DI1_SEL>;
+ <&clks IMX6QDL_CLK_LDB_DI1_SEL>, <&clks IMX6QDL_CLK_ENET_REF_SEL>;
assigned-clock-parents = <&clks IMX6QDL_CLK_PLL5_VIDEO_DIV>,
- <&clks IMX6QDL_CLK_PLL5_VIDEO_DIV>;
+ <&clks IMX6QDL_CLK_PLL5_VIDEO_DIV>, <&clk50m_phy>;
};
&hdmi {
diff --git a/arch/arm/boot/dts/nxp/imx/imx6ul-pico.dtsi b/arch/arm/boot/dts/nxp/imx/imx6ul-pico.dtsi
index 4ffe99ed55ca..07dcecbe485d 100644
--- a/arch/arm/boot/dts/nxp/imx/imx6ul-pico.dtsi
+++ b/arch/arm/boot/dts/nxp/imx/imx6ul-pico.dtsi
@@ -121,6 +121,8 @@
max-speed = <100>;
interrupt-parent = <&gpio5>;
interrupts = <6 IRQ_TYPE_LEVEL_LOW>;
+ clocks = <&clks IMX6UL_CLK_ENET_REF>;
+ clock-names = "rmii-ref";
};
};
};
diff --git a/arch/arm/boot/dts/nxp/imx/imx7s.dtsi b/arch/arm/boot/dts/nxp/imx/imx7s.dtsi
index 29b8fd03567a..5387da8a2a0a 100644
--- a/arch/arm/boot/dts/nxp/imx/imx7s.dtsi
+++ b/arch/arm/boot/dts/nxp/imx/imx7s.dtsi
@@ -454,7 +454,7 @@
};
gpt1: timer@302d0000 {
- compatible = "fsl,imx7d-gpt", "fsl,imx6sx-gpt";
+ compatible = "fsl,imx7d-gpt", "fsl,imx6dl-gpt";
reg = <0x302d0000 0x10000>;
interrupts = <GIC_SPI 55 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clks IMX7D_GPT1_ROOT_CLK>,
@@ -463,7 +463,7 @@
};
gpt2: timer@302e0000 {
- compatible = "fsl,imx7d-gpt", "fsl,imx6sx-gpt";
+ compatible = "fsl,imx7d-gpt", "fsl,imx6dl-gpt";
reg = <0x302e0000 0x10000>;
interrupts = <GIC_SPI 54 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clks IMX7D_GPT2_ROOT_CLK>,
@@ -473,7 +473,7 @@
};
gpt3: timer@302f0000 {
- compatible = "fsl,imx7d-gpt", "fsl,imx6sx-gpt";
+ compatible = "fsl,imx7d-gpt", "fsl,imx6dl-gpt";
reg = <0x302f0000 0x10000>;
interrupts = <GIC_SPI 53 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clks IMX7D_GPT3_ROOT_CLK>,
@@ -483,7 +483,7 @@
};
gpt4: timer@30300000 {
- compatible = "fsl,imx7d-gpt", "fsl,imx6sx-gpt";
+ compatible = "fsl,imx7d-gpt", "fsl,imx6dl-gpt";
reg = <0x30300000 0x10000>;
interrupts = <GIC_SPI 52 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clks IMX7D_GPT4_ROOT_CLK>,
diff --git a/arch/arm/boot/dts/nxp/mxs/imx28-xea.dts b/arch/arm/boot/dts/nxp/mxs/imx28-xea.dts
index a400c108f66a..6c5e6856648a 100644
--- a/arch/arm/boot/dts/nxp/mxs/imx28-xea.dts
+++ b/arch/arm/boot/dts/nxp/mxs/imx28-xea.dts
@@ -8,6 +8,7 @@
#include "imx28-lwe.dtsi"
/ {
+ model = "Liebherr XEA board";
compatible = "lwn,imx28-xea", "fsl,imx28";
};
diff --git a/arch/arm/boot/dts/rockchip/rk3128.dtsi b/arch/arm/boot/dts/rockchip/rk3128.dtsi
index 7bf557c99561..01edf244ddee 100644
--- a/arch/arm/boot/dts/rockchip/rk3128.dtsi
+++ b/arch/arm/boot/dts/rockchip/rk3128.dtsi
@@ -848,7 +848,7 @@
};
sdmmc_pwren: sdmmc-pwren {
- rockchip,pins = <1 RK_PB6 1 &pcfg_pull_default>;
+ rockchip,pins = <1 RK_PB6 RK_FUNC_GPIO &pcfg_pull_default>;
};
sdmmc_bus4: sdmmc-bus4 {
diff --git a/arch/arm/boot/dts/rockchip/rk322x.dtsi b/arch/arm/boot/dts/rockchip/rk322x.dtsi
index ffc16d6b97e1..a721744cbfd1 100644
--- a/arch/arm/boot/dts/rockchip/rk322x.dtsi
+++ b/arch/arm/boot/dts/rockchip/rk322x.dtsi
@@ -215,9 +215,9 @@
power-domain@RK3228_PD_VOP {
reg = <RK3228_PD_VOP>;
- clocks =<&cru ACLK_VOP>,
- <&cru DCLK_VOP>,
- <&cru HCLK_VOP>;
+ clocks = <&cru ACLK_VOP>,
+ <&cru DCLK_VOP>,
+ <&cru HCLK_VOP>;
pm_qos = <&qos_vop>;
#power-domain-cells = <0>;
};
diff --git a/arch/arm/include/asm/kexec.h b/arch/arm/include/asm/kexec.h
index e62832dcba76..a8287e7ab9d4 100644
--- a/arch/arm/include/asm/kexec.h
+++ b/arch/arm/include/asm/kexec.h
@@ -2,8 +2,6 @@
#ifndef _ARM_KEXEC_H
#define _ARM_KEXEC_H
-#ifdef CONFIG_KEXEC
-
/* Maximum physical address we can use pages from */
#define KEXEC_SOURCE_MEMORY_LIMIT (-1UL)
/* Maximum address we can reach in physical address mode */
@@ -82,6 +80,4 @@ static inline struct page *boot_pfn_to_page(unsigned long boot_pfn)
#endif /* __ASSEMBLY__ */
-#endif /* CONFIG_KEXEC */
-
#endif /* _ARM_KEXEC_H */
diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile
index d53f56d6f840..771264d4726a 100644
--- a/arch/arm/kernel/Makefile
+++ b/arch/arm/kernel/Makefile
@@ -59,7 +59,7 @@ obj-$(CONFIG_FUNCTION_TRACER) += entry-ftrace.o
obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o insn.o patch.o
obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o insn.o patch.o
obj-$(CONFIG_JUMP_LABEL) += jump_label.o insn.o patch.o
-obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o
+obj-$(CONFIG_KEXEC_CORE) += machine_kexec.o relocate_kernel.o
# Main staffs in KPROBES are in arch/arm/probes/ .
obj-$(CONFIG_KPROBES) += patch.o insn.o
obj-$(CONFIG_OABI_COMPAT) += sys_oabi-compat.o
diff --git a/arch/arm/mach-imx/mmdc.c b/arch/arm/mach-imx/mmdc.c
index 2157493b78a9..df69af932375 100644
--- a/arch/arm/mach-imx/mmdc.c
+++ b/arch/arm/mach-imx/mmdc.c
@@ -501,6 +501,10 @@ static int imx_mmdc_perf_init(struct platform_device *pdev, void __iomem *mmdc_b
name = devm_kasprintf(&pdev->dev,
GFP_KERNEL, "mmdc%d", ret);
+ if (!name) {
+ ret = -ENOMEM;
+ goto pmu_release_id;
+ }
pmu_mmdc->mmdc_ipg_clk = mmdc_ipg_clk;
pmu_mmdc->devtype_data = (struct fsl_mmdc_devtype_data *)of_id->data;
@@ -523,9 +527,10 @@ static int imx_mmdc_perf_init(struct platform_device *pdev, void __iomem *mmdc_b
pmu_register_err:
pr_warn("MMDC Perf PMU failed (%d), disabled\n", ret);
- ida_simple_remove(&mmdc_ida, pmu_mmdc->id);
cpuhp_state_remove_instance_nocalls(cpuhp_mmdc_state, &pmu_mmdc->node);
hrtimer_cancel(&pmu_mmdc->hrtimer);
+pmu_release_id:
+ ida_simple_remove(&mmdc_ida, pmu_mmdc->id);
pmu_free:
kfree(pmu_mmdc);
return ret;
diff --git a/arch/arm64/boot/dts/freescale/imx8-apalis-v1.1.dtsi b/arch/arm64/boot/dts/freescale/imx8-apalis-v1.1.dtsi
index 5ce5fbf2b38e..f69b0c17560a 100644
--- a/arch/arm64/boot/dts/freescale/imx8-apalis-v1.1.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8-apalis-v1.1.dtsi
@@ -82,12 +82,9 @@
pinctrl-0 = <&pinctrl_wifi_pdn>;
gpio = <&lsio_gpio1 28 GPIO_ACTIVE_HIGH>;
enable-active-high;
+ regulator-always-on;
regulator-name = "wifi_pwrdn_fake_regulator";
regulator-settling-time-us = <100>;
-
- regulator-state-mem {
- regulator-off-in-suspend;
- };
};
reg_pcie_switch: regulator-pcie-switch {
diff --git a/arch/arm64/boot/dts/freescale/imx8-ss-dma.dtsi b/arch/arm64/boot/dts/freescale/imx8-ss-dma.dtsi
index ce66d30a4839..b0bb77150adc 100644
--- a/arch/arm64/boot/dts/freescale/imx8-ss-dma.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8-ss-dma.dtsi
@@ -149,7 +149,7 @@ dma_subsys: bus@5a000000 {
clock-names = "ipg", "per";
assigned-clocks = <&clk IMX_SC_R_LCD_0_PWM_0 IMX_SC_PM_CLK_PER>;
assigned-clock-rates = <24000000>;
- #pwm-cells = <2>;
+ #pwm-cells = <3>;
power-domains = <&pd IMX_SC_R_LCD_0_PWM_0>;
};
diff --git a/arch/arm64/boot/dts/freescale/imx8-ss-lsio.dtsi b/arch/arm64/boot/dts/freescale/imx8-ss-lsio.dtsi
index 49ad3413db94..7e510b21bbac 100644
--- a/arch/arm64/boot/dts/freescale/imx8-ss-lsio.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8-ss-lsio.dtsi
@@ -29,7 +29,7 @@ lsio_subsys: bus@5d000000 {
<&pwm0_lpcg 1>;
assigned-clocks = <&clk IMX_SC_R_PWM_0 IMX_SC_PM_CLK_PER>;
assigned-clock-rates = <24000000>;
- #pwm-cells = <2>;
+ #pwm-cells = <3>;
interrupts = <GIC_SPI 94 IRQ_TYPE_LEVEL_HIGH>;
status = "disabled";
};
@@ -42,7 +42,7 @@ lsio_subsys: bus@5d000000 {
<&pwm1_lpcg 1>;
assigned-clocks = <&clk IMX_SC_R_PWM_1 IMX_SC_PM_CLK_PER>;
assigned-clock-rates = <24000000>;
- #pwm-cells = <2>;
+ #pwm-cells = <3>;
interrupts = <GIC_SPI 95 IRQ_TYPE_LEVEL_HIGH>;
status = "disabled";
};
@@ -55,7 +55,7 @@ lsio_subsys: bus@5d000000 {
<&pwm2_lpcg 1>;
assigned-clocks = <&clk IMX_SC_R_PWM_2 IMX_SC_PM_CLK_PER>;
assigned-clock-rates = <24000000>;
- #pwm-cells = <2>;
+ #pwm-cells = <3>;
interrupts = <GIC_SPI 96 IRQ_TYPE_LEVEL_HIGH>;
status = "disabled";
};
@@ -68,7 +68,7 @@ lsio_subsys: bus@5d000000 {
<&pwm3_lpcg 1>;
assigned-clocks = <&clk IMX_SC_R_PWM_3 IMX_SC_PM_CLK_PER>;
assigned-clock-rates = <24000000>;
- #pwm-cells = <2>;
+ #pwm-cells = <3>;
interrupts = <GIC_SPI 97 IRQ_TYPE_LEVEL_HIGH>;
status = "disabled";
};
diff --git a/arch/arm64/boot/dts/freescale/imx8mp.dtsi b/arch/arm64/boot/dts/freescale/imx8mp.dtsi
index c9a610ba4836..1264da6012f9 100644
--- a/arch/arm64/boot/dts/freescale/imx8mp.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mp.dtsi
@@ -2072,6 +2072,7 @@
phys = <&usb3_phy0>, <&usb3_phy0>;
phy-names = "usb2-phy", "usb3-phy";
snps,gfladj-refclk-lpm-sel-quirk;
+ snps,parkmode-disable-ss-quirk;
};
};
@@ -2114,6 +2115,7 @@
phys = <&usb3_phy1>, <&usb3_phy1>;
phy-names = "usb2-phy", "usb3-phy";
snps,gfladj-refclk-lpm-sel-quirk;
+ snps,parkmode-disable-ss-quirk;
};
};
diff --git a/arch/arm64/boot/dts/freescale/imx8mq.dtsi b/arch/arm64/boot/dts/freescale/imx8mq.dtsi
index 4b1ce9fc1758..c6dc3ba0d43b 100644
--- a/arch/arm64/boot/dts/freescale/imx8mq.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mq.dtsi
@@ -1649,6 +1649,7 @@
phys = <&usb3_phy0>, <&usb3_phy0>;
phy-names = "usb2-phy", "usb3-phy";
power-domains = <&pgc_otg1>;
+ snps,parkmode-disable-ss-quirk;
status = "disabled";
};
@@ -1680,6 +1681,7 @@
phys = <&usb3_phy1>, <&usb3_phy1>;
phy-names = "usb2-phy", "usb3-phy";
power-domains = <&pgc_otg2>;
+ snps,parkmode-disable-ss-quirk;
status = "disabled";
};
diff --git a/arch/arm64/boot/dts/freescale/imx8qm-ss-dma.dtsi b/arch/arm64/boot/dts/freescale/imx8qm-ss-dma.dtsi
index 01539df335f8..8439dd6b3935 100644
--- a/arch/arm64/boot/dts/freescale/imx8qm-ss-dma.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8qm-ss-dma.dtsi
@@ -96,6 +96,17 @@
status = "okay";
};
+&edma3 {
+ power-domains = <&pd IMX_SC_R_DMA_1_CH0>,
+ <&pd IMX_SC_R_DMA_1_CH1>,
+ <&pd IMX_SC_R_DMA_1_CH2>,
+ <&pd IMX_SC_R_DMA_1_CH3>,
+ <&pd IMX_SC_R_DMA_1_CH4>,
+ <&pd IMX_SC_R_DMA_1_CH5>,
+ <&pd IMX_SC_R_DMA_1_CH6>,
+ <&pd IMX_SC_R_DMA_1_CH7>;
+};
+
&flexcan1 {
fsl,clk-source = /bits/ 8 <1>;
};
diff --git a/arch/arm64/boot/dts/freescale/imx8ulp.dtsi b/arch/arm64/boot/dts/freescale/imx8ulp.dtsi
index f22c1ac391c9..c4a0082f30d3 100644
--- a/arch/arm64/boot/dts/freescale/imx8ulp.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8ulp.dtsi
@@ -483,7 +483,7 @@
};
};
- gpioe: gpio@2d000080 {
+ gpioe: gpio@2d000000 {
compatible = "fsl,imx8ulp-gpio";
reg = <0x2d000000 0x1000>;
gpio-controller;
@@ -498,7 +498,7 @@
gpio-ranges = <&iomuxc1 0 32 24>;
};
- gpiof: gpio@2d010080 {
+ gpiof: gpio@2d010000 {
compatible = "fsl,imx8ulp-gpio";
reg = <0x2d010000 0x1000>;
gpio-controller;
@@ -534,7 +534,7 @@
};
};
- gpiod: gpio@2e200080 {
+ gpiod: gpio@2e200000 {
compatible = "fsl,imx8ulp-gpio";
reg = <0x2e200000 0x1000>;
gpio-controller;
diff --git a/arch/arm64/boot/dts/freescale/imx93-tqma9352-mba93xxla.dts b/arch/arm64/boot/dts/freescale/imx93-tqma9352-mba93xxla.dts
index f06139bdff97..3c5c67ebee5d 100644
--- a/arch/arm64/boot/dts/freescale/imx93-tqma9352-mba93xxla.dts
+++ b/arch/arm64/boot/dts/freescale/imx93-tqma9352-mba93xxla.dts
@@ -577,7 +577,7 @@
fsl,pins = <
MX93_PAD_UART2_TXD__LPUART2_TX 0x31e
MX93_PAD_UART2_RXD__LPUART2_RX 0x31e
- MX93_PAD_SAI1_TXD0__LPUART2_RTS_B 0x31e
+ MX93_PAD_SAI1_TXD0__LPUART2_RTS_B 0x51e
>;
};
diff --git a/arch/arm64/boot/dts/freescale/imx93.dtsi b/arch/arm64/boot/dts/freescale/imx93.dtsi
index ceccf4766440..34c0540276d1 100644
--- a/arch/arm64/boot/dts/freescale/imx93.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx93.dtsi
@@ -417,7 +417,7 @@
compatible = "fsl,imx93-src-slice";
reg = <0x44462400 0x400>, <0x44465800 0x400>;
#power-domain-cells = <0>;
- clocks = <&clk IMX93_CLK_MEDIA_AXI>,
+ clocks = <&clk IMX93_CLK_NIC_MEDIA_GATE>,
<&clk IMX93_CLK_MEDIA_APB>;
};
};
@@ -957,7 +957,7 @@
};
};
- gpio2: gpio@43810080 {
+ gpio2: gpio@43810000 {
compatible = "fsl,imx93-gpio", "fsl,imx8ulp-gpio";
reg = <0x43810000 0x1000>;
gpio-controller;
@@ -972,7 +972,7 @@
gpio-ranges = <&iomuxc 0 4 30>;
};
- gpio3: gpio@43820080 {
+ gpio3: gpio@43820000 {
compatible = "fsl,imx93-gpio", "fsl,imx8ulp-gpio";
reg = <0x43820000 0x1000>;
gpio-controller;
@@ -988,7 +988,7 @@
<&iomuxc 26 34 2>, <&iomuxc 28 0 4>;
};
- gpio4: gpio@43830080 {
+ gpio4: gpio@43830000 {
compatible = "fsl,imx93-gpio", "fsl,imx8ulp-gpio";
reg = <0x43830000 0x1000>;
gpio-controller;
@@ -1003,7 +1003,7 @@
gpio-ranges = <&iomuxc 0 38 28>, <&iomuxc 28 36 2>;
};
- gpio1: gpio@47400080 {
+ gpio1: gpio@47400000 {
compatible = "fsl,imx93-gpio", "fsl,imx8ulp-gpio";
reg = <0x47400000 0x1000>;
gpio-controller;
diff --git a/arch/arm64/boot/dts/mediatek/mt7622-bananapi-bpi-r64.dts b/arch/arm64/boot/dts/mediatek/mt7622-bananapi-bpi-r64.dts
index 3b7a176b7904..c46682150e50 100644
--- a/arch/arm64/boot/dts/mediatek/mt7622-bananapi-bpi-r64.dts
+++ b/arch/arm64/boot/dts/mediatek/mt7622-bananapi-bpi-r64.dts
@@ -73,7 +73,7 @@
};
};
- memory {
+ memory@40000000 {
reg = <0 0x40000000 0 0x40000000>;
};
diff --git a/arch/arm64/boot/dts/mediatek/mt7622-rfb1.dts b/arch/arm64/boot/dts/mediatek/mt7622-rfb1.dts
index a885a3fbe456..2dc1bdc74e21 100644
--- a/arch/arm64/boot/dts/mediatek/mt7622-rfb1.dts
+++ b/arch/arm64/boot/dts/mediatek/mt7622-rfb1.dts
@@ -55,7 +55,7 @@
};
};
- memory {
+ memory@40000000 {
reg = <0 0x40000000 0 0x20000000>;
};
diff --git a/arch/arm64/boot/dts/mediatek/mt7986a-bananapi-bpi-r3.dts b/arch/arm64/boot/dts/mediatek/mt7986a-bananapi-bpi-r3.dts
index af4a4309bda4..b876e501216b 100644
--- a/arch/arm64/boot/dts/mediatek/mt7986a-bananapi-bpi-r3.dts
+++ b/arch/arm64/boot/dts/mediatek/mt7986a-bananapi-bpi-r3.dts
@@ -126,6 +126,7 @@
compatible = "sff,sfp";
i2c-bus = <&i2c_sfp1>;
los-gpios = <&pio 46 GPIO_ACTIVE_HIGH>;
+ maximum-power-milliwatt = <3000>;
mod-def0-gpios = <&pio 49 GPIO_ACTIVE_LOW>;
tx-disable-gpios = <&pio 20 GPIO_ACTIVE_HIGH>;
tx-fault-gpios = <&pio 7 GPIO_ACTIVE_HIGH>;
@@ -137,6 +138,7 @@
i2c-bus = <&i2c_sfp2>;
los-gpios = <&pio 31 GPIO_ACTIVE_HIGH>;
mod-def0-gpios = <&pio 47 GPIO_ACTIVE_LOW>;
+ maximum-power-milliwatt = <3000>;
tx-disable-gpios = <&pio 15 GPIO_ACTIVE_HIGH>;
tx-fault-gpios = <&pio 48 GPIO_ACTIVE_HIGH>;
};
@@ -150,16 +152,16 @@
trip = <&cpu_trip_active_high>;
};
- cpu-active-low {
+ cpu-active-med {
/* active: set fan to cooling level 1 */
cooling-device = <&fan 1 1>;
- trip = <&cpu_trip_active_low>;
+ trip = <&cpu_trip_active_med>;
};
- cpu-passive {
- /* passive: set fan to cooling level 0 */
+ cpu-active-low {
+ /* active: set fan to cooling level 0 */
cooling-device = <&fan 0 0>;
- trip = <&cpu_trip_passive>;
+ trip = <&cpu_trip_active_low>;
};
};
};
diff --git a/arch/arm64/boot/dts/mediatek/mt7986a.dtsi b/arch/arm64/boot/dts/mediatek/mt7986a.dtsi
index 24eda00e320d..fc751e049953 100644
--- a/arch/arm64/boot/dts/mediatek/mt7986a.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt7986a.dtsi
@@ -374,6 +374,10 @@
reg = <0 0x11230000 0 0x1000>,
<0 0x11c20000 0 0x1000>;
interrupts = <GIC_SPI 143 IRQ_TYPE_LEVEL_HIGH>;
+ assigned-clocks = <&topckgen CLK_TOP_EMMC_416M_SEL>,
+ <&topckgen CLK_TOP_EMMC_250M_SEL>;
+ assigned-clock-parents = <&apmixedsys CLK_APMIXED_MPLL>,
+ <&topckgen CLK_TOP_NET1PLL_D5_D2>;
clocks = <&topckgen CLK_TOP_EMMC_416M_SEL>,
<&infracfg CLK_INFRA_MSDC_HCK_CK>,
<&infracfg CLK_INFRA_MSDC_CK>,
@@ -610,22 +614,34 @@
thermal-sensors = <&thermal 0>;
trips {
+ cpu_trip_crit: crit {
+ temperature = <125000>;
+ hysteresis = <2000>;
+ type = "critical";
+ };
+
+ cpu_trip_hot: hot {
+ temperature = <120000>;
+ hysteresis = <2000>;
+ type = "hot";
+ };
+
cpu_trip_active_high: active-high {
temperature = <115000>;
hysteresis = <2000>;
type = "active";
};
- cpu_trip_active_low: active-low {
+ cpu_trip_active_med: active-med {
temperature = <85000>;
hysteresis = <2000>;
type = "active";
};
- cpu_trip_passive: passive {
- temperature = <40000>;
+ cpu_trip_active_low: active-low {
+ temperature = <60000>;
hysteresis = <2000>;
- type = "passive";
+ type = "active";
};
};
};
diff --git a/arch/arm64/boot/dts/mediatek/mt8173-evb.dts b/arch/arm64/boot/dts/mediatek/mt8173-evb.dts
index 5122963d8743..d258c80213b2 100644
--- a/arch/arm64/boot/dts/mediatek/mt8173-evb.dts
+++ b/arch/arm64/boot/dts/mediatek/mt8173-evb.dts
@@ -44,7 +44,7 @@
id-gpio = <&pio 16 GPIO_ACTIVE_HIGH>;
};
- usb_p1_vbus: regulator@0 {
+ usb_p1_vbus: regulator-usb-p1 {
compatible = "regulator-fixed";
regulator-name = "usb_vbus";
regulator-min-microvolt = <5000000>;
@@ -53,7 +53,7 @@
enable-active-high;
};
- usb_p0_vbus: regulator@1 {
+ usb_p0_vbus: regulator-usb-p0 {
compatible = "regulator-fixed";
regulator-name = "vbus";
regulator-min-microvolt = <5000000>;
diff --git a/arch/arm64/boot/dts/mediatek/mt8183-evb.dts b/arch/arm64/boot/dts/mediatek/mt8183-evb.dts
index ce336a48c897..77f9ab94c00b 100644
--- a/arch/arm64/boot/dts/mediatek/mt8183-evb.dts
+++ b/arch/arm64/boot/dts/mediatek/mt8183-evb.dts
@@ -31,14 +31,14 @@
#address-cells = <2>;
#size-cells = <2>;
ranges;
- scp_mem_reserved: scp_mem_region {
+ scp_mem_reserved: memory@50000000 {
compatible = "shared-dma-pool";
reg = <0 0x50000000 0 0x2900000>;
no-map;
};
};
- ntc@0 {
+ thermal-sensor {
compatible = "murata,ncp03wf104";
pullup-uv = <1800000>;
pullup-ohm = <390000>;
diff --git a/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi.dtsi b/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi.dtsi
index bf97b60ae4d1..820260348de9 100644
--- a/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi.dtsi
@@ -91,6 +91,8 @@
&dsi0 {
status = "okay";
+ /delete-property/#size-cells;
+ /delete-property/#address-cells;
/delete-node/panel@0;
ports {
port {
@@ -441,20 +443,20 @@
};
touchscreen_pins: touchscreen-pins {
- touch_int_odl {
+ touch-int-odl {
pinmux = <PINMUX_GPIO155__FUNC_GPIO155>;
input-enable;
bias-pull-up;
};
- touch_rst_l {
+ touch-rst-l {
pinmux = <PINMUX_GPIO156__FUNC_GPIO156>;
output-high;
};
};
trackpad_pins: trackpad-pins {
- trackpad_int {
+ trackpad-int {
pinmux = <PINMUX_GPIO7__FUNC_GPIO7>;
input-enable;
bias-disable; /* pulled externally */
diff --git a/arch/arm64/boot/dts/mediatek/mt8183-kukui.dtsi b/arch/arm64/boot/dts/mediatek/mt8183-kukui.dtsi
index bf7de35ffcbc..7881a27be029 100644
--- a/arch/arm64/boot/dts/mediatek/mt8183-kukui.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt8183-kukui.dtsi
@@ -116,7 +116,7 @@
#size-cells = <2>;
ranges;
- scp_mem_reserved: scp_mem_region {
+ scp_mem_reserved: memory@50000000 {
compatible = "shared-dma-pool";
reg = <0 0x50000000 0 0x2900000>;
no-map;
@@ -460,7 +460,7 @@
&pio {
aud_pins_default: audiopins {
- pins_bus {
+ pins-bus {
pinmux = <PINMUX_GPIO97__FUNC_I2S2_MCK>,
<PINMUX_GPIO98__FUNC_I2S2_BCK>,
<PINMUX_GPIO101__FUNC_I2S2_LRCK>,
@@ -482,7 +482,7 @@
};
aud_pins_tdm_out_on: audiotdmouton {
- pins_bus {
+ pins-bus {
pinmux = <PINMUX_GPIO169__FUNC_TDM_BCK_2ND>,
<PINMUX_GPIO170__FUNC_TDM_LRCK_2ND>,
<PINMUX_GPIO171__FUNC_TDM_DATA0_2ND>,
@@ -494,7 +494,7 @@
};
aud_pins_tdm_out_off: audiotdmoutoff {
- pins_bus {
+ pins-bus {
pinmux = <PINMUX_GPIO169__FUNC_GPIO169>,
<PINMUX_GPIO170__FUNC_GPIO170>,
<PINMUX_GPIO171__FUNC_GPIO171>,
@@ -508,13 +508,13 @@
};
bt_pins: bt-pins {
- pins_bt_en {
+ pins-bt-en {
pinmux = <PINMUX_GPIO120__FUNC_GPIO120>;
output-low;
};
};
- ec_ap_int_odl: ec_ap_int_odl {
+ ec_ap_int_odl: ec-ap-int-odl {
pins1 {
pinmux = <PINMUX_GPIO151__FUNC_GPIO151>;
input-enable;
@@ -522,7 +522,7 @@
};
};
- h1_int_od_l: h1_int_od_l {
+ h1_int_od_l: h1-int-od-l {
pins1 {
pinmux = <PINMUX_GPIO153__FUNC_GPIO153>;
input-enable;
@@ -530,7 +530,7 @@
};
i2c0_pins: i2c0 {
- pins_bus {
+ pins-bus {
pinmux = <PINMUX_GPIO82__FUNC_SDA0>,
<PINMUX_GPIO83__FUNC_SCL0>;
mediatek,pull-up-adv = <3>;
@@ -539,7 +539,7 @@
};
i2c1_pins: i2c1 {
- pins_bus {
+ pins-bus {
pinmux = <PINMUX_GPIO81__FUNC_SDA1>,
<PINMUX_GPIO84__FUNC_SCL1>;
mediatek,pull-up-adv = <3>;
@@ -548,7 +548,7 @@
};
i2c2_pins: i2c2 {
- pins_bus {
+ pins-bus {
pinmux = <PINMUX_GPIO103__FUNC_SCL2>,
<PINMUX_GPIO104__FUNC_SDA2>;
bias-disable;
@@ -557,7 +557,7 @@
};
i2c3_pins: i2c3 {
- pins_bus {
+ pins-bus {
pinmux = <PINMUX_GPIO50__FUNC_SCL3>,
<PINMUX_GPIO51__FUNC_SDA3>;
mediatek,pull-up-adv = <3>;
@@ -566,7 +566,7 @@
};
i2c4_pins: i2c4 {
- pins_bus {
+ pins-bus {
pinmux = <PINMUX_GPIO105__FUNC_SCL4>,
<PINMUX_GPIO106__FUNC_SDA4>;
bias-disable;
@@ -575,7 +575,7 @@
};
i2c5_pins: i2c5 {
- pins_bus {
+ pins-bus {
pinmux = <PINMUX_GPIO48__FUNC_SCL5>,
<PINMUX_GPIO49__FUNC_SDA5>;
mediatek,pull-up-adv = <3>;
@@ -584,7 +584,7 @@
};
i2c6_pins: i2c6 {
- pins_bus {
+ pins-bus {
pinmux = <PINMUX_GPIO11__FUNC_SCL6>,
<PINMUX_GPIO12__FUNC_SDA6>;
bias-disable;
@@ -592,7 +592,7 @@
};
mmc0_pins_default: mmc0-pins-default {
- pins_cmd_dat {
+ pins-cmd-dat {
pinmux = <PINMUX_GPIO123__FUNC_MSDC0_DAT0>,
<PINMUX_GPIO128__FUNC_MSDC0_DAT1>,
<PINMUX_GPIO125__FUNC_MSDC0_DAT2>,
@@ -607,13 +607,13 @@
mediatek,pull-up-adv = <01>;
};
- pins_clk {
+ pins-clk {
pinmux = <PINMUX_GPIO124__FUNC_MSDC0_CLK>;
drive-strength = <MTK_DRIVE_14mA>;
mediatek,pull-down-adv = <10>;
};
- pins_rst {
+ pins-rst {
pinmux = <PINMUX_GPIO133__FUNC_MSDC0_RSTB>;
drive-strength = <MTK_DRIVE_14mA>;
mediatek,pull-down-adv = <01>;
@@ -621,7 +621,7 @@
};
mmc0_pins_uhs: mmc0-pins-uhs {
- pins_cmd_dat {
+ pins-cmd-dat {
pinmux = <PINMUX_GPIO123__FUNC_MSDC0_DAT0>,
<PINMUX_GPIO128__FUNC_MSDC0_DAT1>,
<PINMUX_GPIO125__FUNC_MSDC0_DAT2>,
@@ -636,19 +636,19 @@
mediatek,pull-up-adv = <01>;
};
- pins_clk {
+ pins-clk {
pinmux = <PINMUX_GPIO124__FUNC_MSDC0_CLK>;
drive-strength = <MTK_DRIVE_14mA>;
mediatek,pull-down-adv = <10>;
};
- pins_ds {
+ pins-ds {
pinmux = <PINMUX_GPIO131__FUNC_MSDC0_DSL>;
drive-strength = <MTK_DRIVE_14mA>;
mediatek,pull-down-adv = <10>;
};
- pins_rst {
+ pins-rst {
pinmux = <PINMUX_GPIO133__FUNC_MSDC0_RSTB>;
drive-strength = <MTK_DRIVE_14mA>;
mediatek,pull-up-adv = <01>;
@@ -656,7 +656,7 @@
};
mmc1_pins_default: mmc1-pins-default {
- pins_cmd_dat {
+ pins-cmd-dat {
pinmux = <PINMUX_GPIO31__FUNC_MSDC1_CMD>,
<PINMUX_GPIO32__FUNC_MSDC1_DAT0>,
<PINMUX_GPIO34__FUNC_MSDC1_DAT1>,
@@ -666,7 +666,7 @@
mediatek,pull-up-adv = <10>;
};
- pins_clk {
+ pins-clk {
pinmux = <PINMUX_GPIO29__FUNC_MSDC1_CLK>;
input-enable;
mediatek,pull-down-adv = <10>;
@@ -674,7 +674,7 @@
};
mmc1_pins_uhs: mmc1-pins-uhs {
- pins_cmd_dat {
+ pins-cmd-dat {
pinmux = <PINMUX_GPIO31__FUNC_MSDC1_CMD>,
<PINMUX_GPIO32__FUNC_MSDC1_DAT0>,
<PINMUX_GPIO34__FUNC_MSDC1_DAT1>,
@@ -685,7 +685,7 @@
mediatek,pull-up-adv = <10>;
};
- pins_clk {
+ pins-clk {
pinmux = <PINMUX_GPIO29__FUNC_MSDC1_CLK>;
drive-strength = <MTK_DRIVE_8mA>;
mediatek,pull-down-adv = <10>;
@@ -693,15 +693,15 @@
};
};
- panel_pins_default: panel_pins_default {
- panel_reset {
+ panel_pins_default: panel-pins-default {
+ panel-reset {
pinmux = <PINMUX_GPIO45__FUNC_GPIO45>;
output-low;
bias-pull-up;
};
};
- pwm0_pin_default: pwm0_pin_default {
+ pwm0_pin_default: pwm0-pin-default {
pins1 {
pinmux = <PINMUX_GPIO176__FUNC_GPIO176>;
output-high;
@@ -713,14 +713,14 @@
};
scp_pins: scp {
- pins_scp_uart {
+ pins-scp-uart {
pinmux = <PINMUX_GPIO110__FUNC_TP_URXD1_AO>,
<PINMUX_GPIO112__FUNC_TP_UTXD1_AO>;
};
};
spi0_pins: spi0 {
- pins_spi {
+ pins-spi {
pinmux = <PINMUX_GPIO85__FUNC_SPI0_MI>,
<PINMUX_GPIO86__FUNC_GPIO86>,
<PINMUX_GPIO87__FUNC_SPI0_MO>,
@@ -730,7 +730,7 @@
};
spi1_pins: spi1 {
- pins_spi {
+ pins-spi {
pinmux = <PINMUX_GPIO161__FUNC_SPI1_A_MI>,
<PINMUX_GPIO162__FUNC_SPI1_A_CSB>,
<PINMUX_GPIO163__FUNC_SPI1_A_MO>,
@@ -740,20 +740,20 @@
};
spi2_pins: spi2 {
- pins_spi {
+ pins-spi {
pinmux = <PINMUX_GPIO0__FUNC_SPI2_CSB>,
<PINMUX_GPIO1__FUNC_SPI2_MO>,
<PINMUX_GPIO2__FUNC_SPI2_CLK>;
bias-disable;
};
- pins_spi_mi {
+ pins-spi-mi {
pinmux = <PINMUX_GPIO94__FUNC_SPI2_MI>;
mediatek,pull-down-adv = <00>;
};
};
spi3_pins: spi3 {
- pins_spi {
+ pins-spi {
pinmux = <PINMUX_GPIO21__FUNC_SPI3_MI>,
<PINMUX_GPIO22__FUNC_SPI3_CSB>,
<PINMUX_GPIO23__FUNC_SPI3_MO>,
@@ -763,7 +763,7 @@
};
spi4_pins: spi4 {
- pins_spi {
+ pins-spi {
pinmux = <PINMUX_GPIO17__FUNC_SPI4_MI>,
<PINMUX_GPIO18__FUNC_SPI4_CSB>,
<PINMUX_GPIO19__FUNC_SPI4_MO>,
@@ -773,7 +773,7 @@
};
spi5_pins: spi5 {
- pins_spi {
+ pins-spi {
pinmux = <PINMUX_GPIO13__FUNC_SPI5_MI>,
<PINMUX_GPIO14__FUNC_SPI5_CSB>,
<PINMUX_GPIO15__FUNC_SPI5_MO>,
@@ -783,63 +783,63 @@
};
uart0_pins_default: uart0-pins-default {
- pins_rx {
+ pins-rx {
pinmux = <PINMUX_GPIO95__FUNC_URXD0>;
input-enable;
bias-pull-up;
};
- pins_tx {
+ pins-tx {
pinmux = <PINMUX_GPIO96__FUNC_UTXD0>;
};
};
uart1_pins_default: uart1-pins-default {
- pins_rx {
+ pins-rx {
pinmux = <PINMUX_GPIO121__FUNC_URXD1>;
input-enable;
bias-pull-up;
};
- pins_tx {
+ pins-tx {
pinmux = <PINMUX_GPIO115__FUNC_UTXD1>;
};
- pins_rts {
+ pins-rts {
pinmux = <PINMUX_GPIO47__FUNC_URTS1>;
output-enable;
};
- pins_cts {
+ pins-cts {
pinmux = <PINMUX_GPIO46__FUNC_UCTS1>;
input-enable;
};
};
uart1_pins_sleep: uart1-pins-sleep {
- pins_rx {
+ pins-rx {
pinmux = <PINMUX_GPIO121__FUNC_GPIO121>;
input-enable;
bias-pull-up;
};
- pins_tx {
+ pins-tx {
pinmux = <PINMUX_GPIO115__FUNC_UTXD1>;
};
- pins_rts {
+ pins-rts {
pinmux = <PINMUX_GPIO47__FUNC_URTS1>;
output-enable;
};
- pins_cts {
+ pins-cts {
pinmux = <PINMUX_GPIO46__FUNC_UCTS1>;
input-enable;
};
};
wifi_pins_pwrseq: wifi-pins-pwrseq {
- pins_wifi_enable {
+ pins-wifi-enable {
pinmux = <PINMUX_GPIO119__FUNC_GPIO119>;
output-low;
};
};
wifi_pins_wakeup: wifi-pins-wakeup {
- pins_wifi_wakeup {
+ pins-wifi-wakeup {
pinmux = <PINMUX_GPIO113__FUNC_GPIO113>;
input-enable;
};
diff --git a/arch/arm64/boot/dts/mediatek/mt8183.dtsi b/arch/arm64/boot/dts/mediatek/mt8183.dtsi
index 5169779d01df..976dc968b3ca 100644
--- a/arch/arm64/boot/dts/mediatek/mt8183.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt8183.dtsi
@@ -1210,127 +1210,6 @@
nvmem-cell-names = "calibration-data";
};
- thermal_zones: thermal-zones {
- cpu_thermal: cpu-thermal {
- polling-delay-passive = <100>;
- polling-delay = <500>;
- thermal-sensors = <&thermal 0>;
- sustainable-power = <5000>;
-
- trips {
- threshold: trip-point0 {
- temperature = <68000>;
- hysteresis = <2000>;
- type = "passive";
- };
-
- target: trip-point1 {
- temperature = <80000>;
- hysteresis = <2000>;
- type = "passive";
- };
-
- cpu_crit: cpu-crit {
- temperature = <115000>;
- hysteresis = <2000>;
- type = "critical";
- };
- };
-
- cooling-maps {
- map0 {
- trip = <&target>;
- cooling-device = <&cpu0
- THERMAL_NO_LIMIT
- THERMAL_NO_LIMIT>,
- <&cpu1
- THERMAL_NO_LIMIT
- THERMAL_NO_LIMIT>,
- <&cpu2
- THERMAL_NO_LIMIT
- THERMAL_NO_LIMIT>,
- <&cpu3
- THERMAL_NO_LIMIT
- THERMAL_NO_LIMIT>;
- contribution = <3072>;
- };
- map1 {
- trip = <&target>;
- cooling-device = <&cpu4
- THERMAL_NO_LIMIT
- THERMAL_NO_LIMIT>,
- <&cpu5
- THERMAL_NO_LIMIT
- THERMAL_NO_LIMIT>,
- <&cpu6
- THERMAL_NO_LIMIT
- THERMAL_NO_LIMIT>,
- <&cpu7
- THERMAL_NO_LIMIT
- THERMAL_NO_LIMIT>;
- contribution = <1024>;
- };
- };
- };
-
- /* The tzts1 ~ tzts6 don't need to polling */
- /* The tzts1 ~ tzts6 don't need to thermal throttle */
-
- tzts1: tzts1 {
- polling-delay-passive = <0>;
- polling-delay = <0>;
- thermal-sensors = <&thermal 1>;
- sustainable-power = <5000>;
- trips {};
- cooling-maps {};
- };
-
- tzts2: tzts2 {
- polling-delay-passive = <0>;
- polling-delay = <0>;
- thermal-sensors = <&thermal 2>;
- sustainable-power = <5000>;
- trips {};
- cooling-maps {};
- };
-
- tzts3: tzts3 {
- polling-delay-passive = <0>;
- polling-delay = <0>;
- thermal-sensors = <&thermal 3>;
- sustainable-power = <5000>;
- trips {};
- cooling-maps {};
- };
-
- tzts4: tzts4 {
- polling-delay-passive = <0>;
- polling-delay = <0>;
- thermal-sensors = <&thermal 4>;
- sustainable-power = <5000>;
- trips {};
- cooling-maps {};
- };
-
- tzts5: tzts5 {
- polling-delay-passive = <0>;
- polling-delay = <0>;
- thermal-sensors = <&thermal 5>;
- sustainable-power = <5000>;
- trips {};
- cooling-maps {};
- };
-
- tztsABB: tztsABB {
- polling-delay-passive = <0>;
- polling-delay = <0>;
- thermal-sensors = <&thermal 6>;
- sustainable-power = <5000>;
- trips {};
- cooling-maps {};
- };
- };
-
pwm0: pwm@1100e000 {
compatible = "mediatek,mt8183-disp-pwm";
reg = <0 0x1100e000 0 0x1000>;
@@ -2105,4 +1984,125 @@
power-domains = <&spm MT8183_POWER_DOMAIN_CAM>;
};
};
+
+ thermal_zones: thermal-zones {
+ cpu_thermal: cpu-thermal {
+ polling-delay-passive = <100>;
+ polling-delay = <500>;
+ thermal-sensors = <&thermal 0>;
+ sustainable-power = <5000>;
+
+ trips {
+ threshold: trip-point0 {
+ temperature = <68000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+
+ target: trip-point1 {
+ temperature = <80000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+
+ cpu_crit: cpu-crit {
+ temperature = <115000>;
+ hysteresis = <2000>;
+ type = "critical";
+ };
+ };
+
+ cooling-maps {
+ map0 {
+ trip = <&target>;
+ cooling-device = <&cpu0
+ THERMAL_NO_LIMIT
+ THERMAL_NO_LIMIT>,
+ <&cpu1
+ THERMAL_NO_LIMIT
+ THERMAL_NO_LIMIT>,
+ <&cpu2
+ THERMAL_NO_LIMIT
+ THERMAL_NO_LIMIT>,
+ <&cpu3
+ THERMAL_NO_LIMIT
+ THERMAL_NO_LIMIT>;
+ contribution = <3072>;
+ };
+ map1 {
+ trip = <&target>;
+ cooling-device = <&cpu4
+ THERMAL_NO_LIMIT
+ THERMAL_NO_LIMIT>,
+ <&cpu5
+ THERMAL_NO_LIMIT
+ THERMAL_NO_LIMIT>,
+ <&cpu6
+ THERMAL_NO_LIMIT
+ THERMAL_NO_LIMIT>,
+ <&cpu7
+ THERMAL_NO_LIMIT
+ THERMAL_NO_LIMIT>;
+ contribution = <1024>;
+ };
+ };
+ };
+
+ /* The tzts1 ~ tzts6 don't need to polling */
+ /* The tzts1 ~ tzts6 don't need to thermal throttle */
+
+ tzts1: tzts1 {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-sensors = <&thermal 1>;
+ sustainable-power = <5000>;
+ trips {};
+ cooling-maps {};
+ };
+
+ tzts2: tzts2 {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-sensors = <&thermal 2>;
+ sustainable-power = <5000>;
+ trips {};
+ cooling-maps {};
+ };
+
+ tzts3: tzts3 {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-sensors = <&thermal 3>;
+ sustainable-power = <5000>;
+ trips {};
+ cooling-maps {};
+ };
+
+ tzts4: tzts4 {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-sensors = <&thermal 4>;
+ sustainable-power = <5000>;
+ trips {};
+ cooling-maps {};
+ };
+
+ tzts5: tzts5 {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-sensors = <&thermal 5>;
+ sustainable-power = <5000>;
+ trips {};
+ cooling-maps {};
+ };
+
+ tztsABB: tztsABB {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-sensors = <&thermal 6>;
+ sustainable-power = <5000>;
+ trips {};
+ cooling-maps {};
+ };
+ };
};
diff --git a/arch/arm64/boot/dts/mediatek/mt8186.dtsi b/arch/arm64/boot/dts/mediatek/mt8186.dtsi
index f04ae70c470a..df0c04f2ba1d 100644
--- a/arch/arm64/boot/dts/mediatek/mt8186.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt8186.dtsi
@@ -924,7 +924,8 @@
reg = <MT8186_POWER_DOMAIN_CSIRX_TOP>;
clocks = <&topckgen CLK_TOP_SENINF>,
<&topckgen CLK_TOP_SENINF1>;
- clock-names = "csirx_top0", "csirx_top1";
+ clock-names = "subsys-csirx-top0",
+ "subsys-csirx-top1";
#power-domain-cells = <0>;
};
@@ -942,7 +943,8 @@
reg = <MT8186_POWER_DOMAIN_ADSP_AO>;
clocks = <&topckgen CLK_TOP_AUDIODSP>,
<&topckgen CLK_TOP_ADSP_BUS>;
- clock-names = "audioadsp", "adsp_bus";
+ clock-names = "audioadsp",
+ "subsys-adsp-bus";
#address-cells = <1>;
#size-cells = <0>;
#power-domain-cells = <1>;
@@ -975,8 +977,11 @@
<&mmsys CLK_MM_SMI_COMMON>,
<&mmsys CLK_MM_SMI_GALS>,
<&mmsys CLK_MM_SMI_IOMMU>;
- clock-names = "disp", "mdp", "smi_infra", "smi_common",
- "smi_gals", "smi_iommu";
+ clock-names = "disp", "mdp",
+ "subsys-smi-infra",
+ "subsys-smi-common",
+ "subsys-smi-gals",
+ "subsys-smi-iommu";
mediatek,infracfg = <&infracfg_ao>;
#address-cells = <1>;
#size-cells = <0>;
@@ -993,15 +998,17 @@
power-domain@MT8186_POWER_DOMAIN_CAM {
reg = <MT8186_POWER_DOMAIN_CAM>;
- clocks = <&topckgen CLK_TOP_CAM>,
- <&topckgen CLK_TOP_SENINF>,
+ clocks = <&topckgen CLK_TOP_SENINF>,
<&topckgen CLK_TOP_SENINF1>,
<&topckgen CLK_TOP_SENINF2>,
<&topckgen CLK_TOP_SENINF3>,
+ <&camsys CLK_CAM2MM_GALS>,
<&topckgen CLK_TOP_CAMTM>,
- <&camsys CLK_CAM2MM_GALS>;
- clock-names = "cam-top", "cam0", "cam1", "cam2",
- "cam3", "cam-tm", "gals";
+ <&topckgen CLK_TOP_CAM>;
+ clock-names = "cam0", "cam1", "cam2",
+ "cam3", "gals",
+ "subsys-cam-tm",
+ "subsys-cam-top";
mediatek,infracfg = <&infracfg_ao>;
#address-cells = <1>;
#size-cells = <0>;
@@ -1020,9 +1027,9 @@
power-domain@MT8186_POWER_DOMAIN_IMG {
reg = <MT8186_POWER_DOMAIN_IMG>;
- clocks = <&topckgen CLK_TOP_IMG1>,
- <&imgsys1 CLK_IMG1_GALS_IMG1>;
- clock-names = "img-top", "gals";
+ clocks = <&imgsys1 CLK_IMG1_GALS_IMG1>,
+ <&topckgen CLK_TOP_IMG1>;
+ clock-names = "gals", "subsys-img-top";
mediatek,infracfg = <&infracfg_ao>;
#address-cells = <1>;
#size-cells = <0>;
@@ -1041,8 +1048,11 @@
<&ipesys CLK_IPE_LARB20>,
<&ipesys CLK_IPE_SMI_SUBCOM>,
<&ipesys CLK_IPE_GALS_IPE>;
- clock-names = "ipe-top", "ipe-larb0", "ipe-larb1",
- "ipe-smi", "ipe-gals";
+ clock-names = "subsys-ipe-top",
+ "subsys-ipe-larb0",
+ "subsys-ipe-larb1",
+ "subsys-ipe-smi",
+ "subsys-ipe-gals";
mediatek,infracfg = <&infracfg_ao>;
#power-domain-cells = <0>;
};
@@ -1061,7 +1071,9 @@
clocks = <&topckgen CLK_TOP_WPE>,
<&wpesys CLK_WPE_SMI_LARB8_CK_EN>,
<&wpesys CLK_WPE_SMI_LARB8_PCLK_EN>;
- clock-names = "wpe0", "larb-ck", "larb-pclk";
+ clock-names = "wpe0",
+ "subsys-larb-ck",
+ "subsys-larb-pclk";
mediatek,infracfg = <&infracfg_ao>;
#power-domain-cells = <0>;
};
@@ -1656,7 +1668,7 @@
#address-cells = <1>;
#size-cells = <1>;
- gpu_speedbin: gpu-speed-bin@59c {
+ gpu_speedbin: gpu-speedbin@59c {
reg = <0x59c 0x4>;
bits = <0 3>;
};
diff --git a/arch/arm64/boot/dts/mediatek/mt8195-cherry.dtsi b/arch/arm64/boot/dts/mediatek/mt8195-cherry.dtsi
index dd5b89b73190..5a7cab489ff3 100644
--- a/arch/arm64/boot/dts/mediatek/mt8195-cherry.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt8195-cherry.dtsi
@@ -389,7 +389,7 @@
pinctrl-0 = <&i2c7_pins>;
pmic@34 {
- #interrupt-cells = <1>;
+ #interrupt-cells = <2>;
compatible = "mediatek,mt6360";
reg = <0x34>;
interrupt-controller;
diff --git a/arch/arm64/boot/dts/mediatek/mt8195.dtsi b/arch/arm64/boot/dts/mediatek/mt8195.dtsi
index 54c674c45b49..e0ac2e9f5b72 100644
--- a/arch/arm64/boot/dts/mediatek/mt8195.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt8195.dtsi
@@ -627,6 +627,8 @@
power-domain@MT8195_POWER_DOMAIN_VENC_CORE1 {
reg = <MT8195_POWER_DOMAIN_VENC_CORE1>;
+ clocks = <&vencsys_core1 CLK_VENC_CORE1_LARB>;
+ clock-names = "venc1-larb";
mediatek,infracfg = <&infracfg_ao>;
#power-domain-cells = <0>;
};
@@ -689,6 +691,8 @@
power-domain@MT8195_POWER_DOMAIN_VENC {
reg = <MT8195_POWER_DOMAIN_VENC>;
+ clocks = <&vencsys CLK_VENC_LARB>;
+ clock-names = "venc0-larb";
mediatek,infracfg = <&infracfg_ao>;
#power-domain-cells = <0>;
};
@@ -2665,7 +2669,7 @@
reg = <0 0x1b010000 0 0x1000>;
mediatek,larb-id = <20>;
mediatek,smi = <&smi_common_vpp>;
- clocks = <&vencsys_core1 CLK_VENC_CORE1_LARB>,
+ clocks = <&vencsys_core1 CLK_VENC_CORE1_VENC>,
<&vencsys_core1 CLK_VENC_CORE1_GALS>,
<&vppsys0 CLK_VPP0_GALS_VDO0_VDO1_VENCSYS_CORE1>;
clock-names = "apb", "smi", "gals";
diff --git a/arch/arm64/boot/dts/rockchip/px30-ringneck-haikou.dts b/arch/arm64/boot/dts/rockchip/px30-ringneck-haikou.dts
index de0a1f2af983..7d4c5324c61b 100644
--- a/arch/arm64/boot/dts/rockchip/px30-ringneck-haikou.dts
+++ b/arch/arm64/boot/dts/rockchip/px30-ringneck-haikou.dts
@@ -86,7 +86,7 @@
sgtl5000_clk: sgtl5000-oscillator {
compatible = "fixed-clock";
#clock-cells = <0>;
- clock-frequency = <24576000>;
+ clock-frequency = <24576000>;
};
dc_12v: dc-12v-regulator {
diff --git a/arch/arm64/boot/dts/rockchip/rk3328.dtsi b/arch/arm64/boot/dts/rockchip/rk3328.dtsi
index e729e7a22b23..cc8209795c3e 100644
--- a/arch/arm64/boot/dts/rockchip/rk3328.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3328.dtsi
@@ -668,7 +668,7 @@
vdec: video-codec@ff360000 {
compatible = "rockchip,rk3328-vdec", "rockchip,rk3399-vdec";
- reg = <0x0 0xff360000 0x0 0x400>;
+ reg = <0x0 0xff360000 0x0 0x480>;
interrupts = <GIC_SPI 7 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cru ACLK_RKVDEC>, <&cru HCLK_RKVDEC>,
<&cru SCLK_VDEC_CABAC>, <&cru SCLK_VDEC_CORE>;
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-gru-chromebook.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-gru-chromebook.dtsi
index 5c1929d41cc0..cacbad35cfc8 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-gru-chromebook.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3399-gru-chromebook.dtsi
@@ -509,8 +509,7 @@ ap_i2c_tp: &i2c5 {
&pci_rootport {
mvl_wifi: wifi@0,0 {
compatible = "pci1b4b,2b42";
- reg = <0x83010000 0x0 0x00000000 0x0 0x00100000
- 0x83010000 0x0 0x00100000 0x0 0x00100000>;
+ reg = <0x0000 0x0 0x0 0x0 0x0>;
interrupt-parent = <&gpio0>;
interrupts = <8 IRQ_TYPE_LEVEL_LOW>;
pinctrl-names = "default";
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-gru-scarlet-dumo.dts b/arch/arm64/boot/dts/rockchip/rk3399-gru-scarlet-dumo.dts
index 853e88455e75..9e4b12ed62cb 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-gru-scarlet-dumo.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3399-gru-scarlet-dumo.dts
@@ -34,8 +34,8 @@
&pci_rootport {
wifi@0,0 {
compatible = "qcom,ath10k";
- reg = <0x00010000 0x0 0x00000000 0x0 0x00000000>,
- <0x03010010 0x0 0x00000000 0x0 0x00200000>;
+ reg = <0x00000000 0x0 0x00000000 0x0 0x00000000>,
+ <0x03000010 0x0 0x00000000 0x0 0x00200000>;
qcom,ath10k-calibration-variant = "GO_DUMO";
};
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-gru.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-gru.dtsi
index c9bf1d5c3a42..789fd0dcc88b 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-gru.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3399-gru.dtsi
@@ -489,6 +489,7 @@ ap_i2c_audio: &i2c8 {
#address-cells = <3>;
#size-cells = <2>;
ranges;
+ device_type = "pci";
};
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3399.dtsi b/arch/arm64/boot/dts/rockchip/rk3399.dtsi
index faf02e59d6c7..da0dfb237f85 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3399.dtsi
@@ -1109,7 +1109,9 @@
power-domain@RK3399_PD_VDU {
reg = <RK3399_PD_VDU>;
clocks = <&cru ACLK_VDU>,
- <&cru HCLK_VDU>;
+ <&cru HCLK_VDU>,
+ <&cru SCLK_VDU_CA>,
+ <&cru SCLK_VDU_CORE>;
pm_qos = <&qos_video_m1_r>,
<&qos_video_m1_w>;
#power-domain-cells = <0>;
@@ -1384,7 +1386,7 @@
vdec: video-codec@ff660000 {
compatible = "rockchip,rk3399-vdec";
- reg = <0x0 0xff660000 0x0 0x400>;
+ reg = <0x0 0xff660000 0x0 0x480>;
interrupts = <GIC_SPI 116 IRQ_TYPE_LEVEL_HIGH 0>;
clocks = <&cru ACLK_VDU>, <&cru HCLK_VDU>,
<&cru SCLK_VDU_CA>, <&cru SCLK_VDU_CORE>;
diff --git a/arch/arm64/boot/dts/rockchip/rk356x.dtsi b/arch/arm64/boot/dts/rockchip/rk356x.dtsi
index 0964761e3ce9..c19c0f1b3778 100644
--- a/arch/arm64/boot/dts/rockchip/rk356x.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk356x.dtsi
@@ -977,7 +977,7 @@
<GIC_SPI 73 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 72 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 71 IRQ_TYPE_LEVEL_HIGH>;
- interrupt-names = "sys", "pmc", "msi", "legacy", "err";
+ interrupt-names = "sys", "pmc", "msg", "legacy", "err";
bus-range = <0x0 0xf>;
clocks = <&cru ACLK_PCIE20_MST>, <&cru ACLK_PCIE20_SLV>,
<&cru ACLK_PCIE20_DBI>, <&cru PCLK_PCIE20>,
diff --git a/arch/arm64/boot/dts/rockchip/rk3588-turing-rk1.dtsi b/arch/arm64/boot/dts/rockchip/rk3588-turing-rk1.dtsi
index 9570b34aca2e..d88c0e852356 100644
--- a/arch/arm64/boot/dts/rockchip/rk3588-turing-rk1.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3588-turing-rk1.dtsi
@@ -235,13 +235,13 @@
&pinctrl {
fan {
fan_int: fan-int {
- rockchip,pins = <0 RK_PA4 RK_FUNC_GPIO &pcfg_pull_none>;
+ rockchip,pins = <0 RK_PA4 RK_FUNC_GPIO &pcfg_pull_up>;
};
};
hym8563 {
hym8563_int: hym8563-int {
- rockchip,pins = <0 RK_PB0 RK_FUNC_GPIO &pcfg_pull_none>;
+ rockchip,pins = <0 RK_PB0 RK_FUNC_GPIO &pcfg_pull_up>;
};
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3588s-orangepi-5.dts b/arch/arm64/boot/dts/rockchip/rk3588s-orangepi-5.dts
index 8f399c4317bd..e3a839a12dc6 100644
--- a/arch/arm64/boot/dts/rockchip/rk3588s-orangepi-5.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3588s-orangepi-5.dts
@@ -38,7 +38,7 @@
leds {
compatible = "gpio-leds";
pinctrl-names = "default";
- pinctrl-0 =<&leds_gpio>;
+ pinctrl-0 = <&leds_gpio>;
led-1 {
gpios = <&gpio1 RK_PA2 GPIO_ACTIVE_HIGH>;
diff --git a/arch/arm64/boot/dts/rockchip/rk3588s-pinctrl.dtsi b/arch/arm64/boot/dts/rockchip/rk3588s-pinctrl.dtsi
index 63151d9d2377..30db12c4fc82 100644
--- a/arch/arm64/boot/dts/rockchip/rk3588s-pinctrl.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3588s-pinctrl.dtsi
@@ -369,7 +369,7 @@
emmc_data_strobe: emmc-data-strobe {
rockchip,pins =
/* emmc_data_strobe */
- <2 RK_PA2 1 &pcfg_pull_none>;
+ <2 RK_PA2 1 &pcfg_pull_down>;
};
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3588s.dtsi b/arch/arm64/boot/dts/rockchip/rk3588s.dtsi
index 7064c0e9179f..8aa0499f9b03 100644
--- a/arch/arm64/boot/dts/rockchip/rk3588s.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3588s.dtsi
@@ -1362,7 +1362,6 @@
<GIC_SPI 38 IRQ_TYPE_LEVEL_HIGH 0>,
<GIC_SPI 48 IRQ_TYPE_LEVEL_HIGH 0>,
<GIC_SPI 58 IRQ_TYPE_LEVEL_HIGH 0>;
- interrupt-names = "ch0", "ch1", "ch2", "ch3";
rockchip,pmu = <&pmu1grf>;
};
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index 646591c67e7a..91d2d6714969 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -1839,6 +1839,10 @@ static int __init __kpti_install_ng_mappings(void *__unused)
static void __init kpti_install_ng_mappings(void)
{
+ /* Check whether KPTI is going to be used */
+ if (!cpus_have_cap(ARM64_UNMAP_KERNEL_AT_EL0))
+ return;
+
/*
* We don't need to rewrite the page-tables if either we've done
* it already or we have KASLR enabled and therefore have not
diff --git a/arch/arm64/kvm/vgic/vgic-v4.c b/arch/arm64/kvm/vgic/vgic-v4.c
index 339a55194b2c..74a67ad87f29 100644
--- a/arch/arm64/kvm/vgic/vgic-v4.c
+++ b/arch/arm64/kvm/vgic/vgic-v4.c
@@ -436,6 +436,10 @@ int kvm_vgic_v4_set_forwarding(struct kvm *kvm, int virq,
if (ret)
goto out;
+ /* Silently exit if the vLPI is already mapped */
+ if (irq->hw)
+ goto out;
+
/*
* Emit the mapping request. If it fails, the ITS probably
* isn't v4 compatible, so let's silently bail out. Holding
diff --git a/arch/loongarch/Makefile b/arch/loongarch/Makefile
index 204b94b2e6aa..4ba8d67ddb09 100644
--- a/arch/loongarch/Makefile
+++ b/arch/loongarch/Makefile
@@ -83,7 +83,7 @@ endif
ifeq ($(CONFIG_RELOCATABLE),y)
KBUILD_CFLAGS_KERNEL += -fPIE
-LDFLAGS_vmlinux += -static -pie --no-dynamic-linker -z notext
+LDFLAGS_vmlinux += -static -pie --no-dynamic-linker -z notext $(call ld-option, --apply-dynamic-relocs)
endif
cflags-y += $(call cc-option, -mno-check-zero-division)
diff --git a/arch/loongarch/include/asm/elf.h b/arch/loongarch/include/asm/elf.h
index b9a4ab54285c..9b16a3b8e706 100644
--- a/arch/loongarch/include/asm/elf.h
+++ b/arch/loongarch/include/asm/elf.h
@@ -293,7 +293,7 @@ extern const char *__elf_platform;
#define ELF_PLAT_INIT(_r, load_addr) do { \
_r->regs[1] = _r->regs[2] = _r->regs[3] = _r->regs[4] = 0; \
_r->regs[5] = _r->regs[6] = _r->regs[7] = _r->regs[8] = 0; \
- _r->regs[9] = _r->regs[10] = _r->regs[11] = _r->regs[12] = 0; \
+ _r->regs[9] = _r->regs[10] /* syscall n */ = _r->regs[12] = 0; \
_r->regs[13] = _r->regs[14] = _r->regs[15] = _r->regs[16] = 0; \
_r->regs[17] = _r->regs[18] = _r->regs[19] = _r->regs[20] = 0; \
_r->regs[21] = _r->regs[22] = _r->regs[23] = _r->regs[24] = 0; \
diff --git a/arch/loongarch/include/asm/loongarch.h b/arch/loongarch/include/asm/loongarch.h
index 9b4957cefa8a..46366e783c84 100644
--- a/arch/loongarch/include/asm/loongarch.h
+++ b/arch/loongarch/include/asm/loongarch.h
@@ -1098,12 +1098,11 @@
static __always_inline u64 drdtime(void)
{
- int rID = 0;
u64 val = 0;
__asm__ __volatile__(
- "rdtime.d %0, %1 \n\t"
- : "=r"(val), "=r"(rID)
+ "rdtime.d %0, $zero\n\t"
+ : "=r"(val)
:
);
return val;
diff --git a/arch/loongarch/kernel/stacktrace.c b/arch/loongarch/kernel/stacktrace.c
index 92270f14db94..f623feb2129f 100644
--- a/arch/loongarch/kernel/stacktrace.c
+++ b/arch/loongarch/kernel/stacktrace.c
@@ -32,7 +32,7 @@ void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie,
}
for (unwind_start(&state, task, regs);
- !unwind_done(&state) && !unwind_error(&state); unwind_next_frame(&state)) {
+ !unwind_done(&state); unwind_next_frame(&state)) {
addr = unwind_get_return_address(&state);
if (!addr || !consume_entry(cookie, addr))
break;
diff --git a/arch/loongarch/kernel/unwind.c b/arch/loongarch/kernel/unwind.c
index ba324ba76fa1..a463d6961344 100644
--- a/arch/loongarch/kernel/unwind.c
+++ b/arch/loongarch/kernel/unwind.c
@@ -28,6 +28,5 @@ bool default_next_frame(struct unwind_state *state)
} while (!get_stack_info(state->sp, state->task, info));
- state->error = true;
return false;
}
diff --git a/arch/loongarch/kernel/unwind_prologue.c b/arch/loongarch/kernel/unwind_prologue.c
index 55afc27320e1..929ae240280a 100644
--- a/arch/loongarch/kernel/unwind_prologue.c
+++ b/arch/loongarch/kernel/unwind_prologue.c
@@ -227,7 +227,7 @@ static bool next_frame(struct unwind_state *state)
} while (!get_stack_info(state->sp, state->task, info));
out:
- state->error = true;
+ state->stack_info.type = STACK_TYPE_UNKNOWN;
return false;
}
diff --git a/arch/loongarch/net/bpf_jit.c b/arch/loongarch/net/bpf_jit.c
index 169ff8b3915e..4fcd6cd6da23 100644
--- a/arch/loongarch/net/bpf_jit.c
+++ b/arch/loongarch/net/bpf_jit.c
@@ -480,10 +480,12 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, bool ext
case 8:
move_reg(ctx, t1, src);
emit_insn(ctx, extwb, dst, t1);
+ emit_zext_32(ctx, dst, is32);
break;
case 16:
move_reg(ctx, t1, src);
emit_insn(ctx, extwh, dst, t1);
+ emit_zext_32(ctx, dst, is32);
break;
case 32:
emit_insn(ctx, addw, dst, src, LOONGARCH_GPR_ZERO);
@@ -772,8 +774,8 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, bool ext
break;
case 32:
emit_insn(ctx, revb2w, dst, dst);
- /* zero-extend 32 bits into 64 bits */
- emit_zext_32(ctx, dst, is32);
+ /* clear the upper 32 bits */
+ emit_zext_32(ctx, dst, true);
break;
case 64:
emit_insn(ctx, revbd, dst, dst);
@@ -911,8 +913,6 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, bool ext
/* function return */
case BPF_JMP | BPF_EXIT:
- emit_sext_32(ctx, regmap[BPF_REG_0], true);
-
if (i == ctx->prog->len - 1)
break;
@@ -988,14 +988,8 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, bool ext
}
break;
case BPF_DW:
- if (is_signed_imm12(off)) {
- emit_insn(ctx, ldd, dst, src, off);
- } else if (is_signed_imm14(off)) {
- emit_insn(ctx, ldptrd, dst, src, off);
- } else {
- move_imm(ctx, t1, off, is32);
- emit_insn(ctx, ldxd, dst, src, t1);
- }
+ move_imm(ctx, t1, off, is32);
+ emit_insn(ctx, ldxd, dst, src, t1);
break;
}
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index 76db82542519..797ae590ebdb 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -460,6 +460,7 @@ config MACH_LOONGSON2EF
config MACH_LOONGSON64
bool "Loongson 64-bit family of machines"
+ select ARCH_DMA_DEFAULT_COHERENT
select ARCH_SPARSEMEM_ENABLE
select ARCH_MIGHT_HAVE_PC_PARPORT
select ARCH_MIGHT_HAVE_PC_SERIO
@@ -1251,6 +1252,7 @@ config CPU_LOONGSON64
select CPU_SUPPORTS_MSA
select CPU_DIEI_BROKEN if !LOONGSON3_ENHANCEMENT
select CPU_MIPSR2_IRQ_VI
+ select DMA_NONCOHERENT
select WEAK_ORDERING
select WEAK_REORDERING_BEYOND_LLSC
select MIPS_ASID_BITS_VARIABLE
diff --git a/arch/mips/include/asm/mach-loongson64/boot_param.h b/arch/mips/include/asm/mach-loongson64/boot_param.h
index 035b1a69e2d0..e007edd6b60a 100644
--- a/arch/mips/include/asm/mach-loongson64/boot_param.h
+++ b/arch/mips/include/asm/mach-loongson64/boot_param.h
@@ -14,7 +14,11 @@
#define ADAPTER_ROM 8
#define ACPI_TABLE 9
#define SMBIOS_TABLE 10
-#define MAX_MEMORY_TYPE 11
+#define UMA_VIDEO_RAM 11
+#define VUMA_VIDEO_RAM 12
+#define MAX_MEMORY_TYPE 13
+
+#define MEM_SIZE_IS_IN_BYTES (1 << 31)
#define LOONGSON3_BOOT_MEM_MAP_MAX 128
struct efi_memory_map_loongson {
@@ -117,7 +121,8 @@ struct irq_source_routing_table {
u64 pci_io_start_addr;
u64 pci_io_end_addr;
u64 pci_config_addr;
- u32 dma_mask_bits;
+ u16 dma_mask_bits;
+ u16 dma_noncoherent;
} __packed;
struct interface_info {
diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
index 5387ed0a5186..b630604c577f 100644
--- a/arch/mips/kernel/process.c
+++ b/arch/mips/kernel/process.c
@@ -121,6 +121,19 @@ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
/* Put the stack after the struct pt_regs. */
childksp = (unsigned long) childregs;
p->thread.cp0_status = (read_c0_status() & ~(ST0_CU2|ST0_CU1)) | ST0_KERNEL_CUMASK;
+
+ /*
+ * New tasks lose permission to use the fpu. This accelerates context
+ * switching for most programs since they don't use the fpu.
+ */
+ clear_tsk_thread_flag(p, TIF_USEDFPU);
+ clear_tsk_thread_flag(p, TIF_USEDMSA);
+ clear_tsk_thread_flag(p, TIF_MSA_CTX_LIVE);
+
+#ifdef CONFIG_MIPS_MT_FPAFF
+ clear_tsk_thread_flag(p, TIF_FPUBOUND);
+#endif /* CONFIG_MIPS_MT_FPAFF */
+
if (unlikely(args->fn)) {
/* kernel thread */
unsigned long status = p->thread.cp0_status;
@@ -149,20 +162,8 @@ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
p->thread.reg29 = (unsigned long) childregs;
p->thread.reg31 = (unsigned long) ret_from_fork;
- /*
- * New tasks lose permission to use the fpu. This accelerates context
- * switching for most programs since they don't use the fpu.
- */
childregs->cp0_status &= ~(ST0_CU2|ST0_CU1);
- clear_tsk_thread_flag(p, TIF_USEDFPU);
- clear_tsk_thread_flag(p, TIF_USEDMSA);
- clear_tsk_thread_flag(p, TIF_MSA_CTX_LIVE);
-
-#ifdef CONFIG_MIPS_MT_FPAFF
- clear_tsk_thread_flag(p, TIF_FPUBOUND);
-#endif /* CONFIG_MIPS_MT_FPAFF */
-
#ifdef CONFIG_MIPS_FP_SUPPORT
atomic_set(&p->thread.bd_emu_frame, BD_EMUFRAME_NONE);
#endif
diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c
index 8fbef537fb88..82e2e051b416 100644
--- a/arch/mips/kernel/smp.c
+++ b/arch/mips/kernel/smp.c
@@ -351,10 +351,11 @@ early_initcall(mips_smp_ipi_init);
*/
asmlinkage void start_secondary(void)
{
- unsigned int cpu;
+ unsigned int cpu = raw_smp_processor_id();
cpu_probe();
per_cpu_trap_init(false);
+ rcutree_report_cpu_starting(cpu);
mips_clockevent_init();
mp_ops->init_secondary();
cpu_report();
@@ -366,7 +367,6 @@ asmlinkage void start_secondary(void)
*/
calibrate_delay();
- cpu = smp_processor_id();
cpu_data[cpu].udelay_val = loops_per_jiffy;
set_cpu_sibling_map(cpu);
diff --git a/arch/mips/loongson64/env.c b/arch/mips/loongson64/env.c
index c961e2999f15..ef3750a6ffac 100644
--- a/arch/mips/loongson64/env.c
+++ b/arch/mips/loongson64/env.c
@@ -13,6 +13,8 @@
* Copyright (C) 2009 Lemote Inc.
* Author: Wu Zhangjin, [email protected]
*/
+
+#include <linux/dma-map-ops.h>
#include <linux/export.h>
#include <linux/pci_ids.h>
#include <asm/bootinfo.h>
@@ -147,8 +149,14 @@ void __init prom_lefi_init_env(void)
loongson_sysconf.dma_mask_bits = eirq_source->dma_mask_bits;
if (loongson_sysconf.dma_mask_bits < 32 ||
- loongson_sysconf.dma_mask_bits > 64)
+ loongson_sysconf.dma_mask_bits > 64) {
loongson_sysconf.dma_mask_bits = 32;
+ dma_default_coherent = true;
+ } else {
+ dma_default_coherent = !eirq_source->dma_noncoherent;
+ }
+
+ pr_info("Firmware: Coherent DMA: %s\n", dma_default_coherent ? "on" : "off");
loongson_sysconf.restart_addr = boot_p->reset_system.ResetWarm;
loongson_sysconf.poweroff_addr = boot_p->reset_system.Shutdown;
diff --git a/arch/mips/loongson64/init.c b/arch/mips/loongson64/init.c
index ee8de1735b7c..f25caa6aa9d3 100644
--- a/arch/mips/loongson64/init.c
+++ b/arch/mips/loongson64/init.c
@@ -49,8 +49,7 @@ void virtual_early_config(void)
void __init szmem(unsigned int node)
{
u32 i, mem_type;
- static unsigned long num_physpages;
- u64 node_id, node_psize, start_pfn, end_pfn, mem_start, mem_size;
+ phys_addr_t node_id, mem_start, mem_size;
/* Otherwise come from DTB */
if (loongson_sysconf.fw_interface != LOONGSON_LEFI)
@@ -64,30 +63,46 @@ void __init szmem(unsigned int node)
mem_type = loongson_memmap->map[i].mem_type;
mem_size = loongson_memmap->map[i].mem_size;
- mem_start = loongson_memmap->map[i].mem_start;
+
+ /* Memory size comes in MB if MEM_SIZE_IS_IN_BYTES not set */
+ if (mem_size & MEM_SIZE_IS_IN_BYTES)
+ mem_size &= ~MEM_SIZE_IS_IN_BYTES;
+ else
+ mem_size = mem_size << 20;
+
+ mem_start = (node_id << 44) | loongson_memmap->map[i].mem_start;
switch (mem_type) {
case SYSTEM_RAM_LOW:
case SYSTEM_RAM_HIGH:
- start_pfn = ((node_id << 44) + mem_start) >> PAGE_SHIFT;
- node_psize = (mem_size << 20) >> PAGE_SHIFT;
- end_pfn = start_pfn + node_psize;
- num_physpages += node_psize;
- pr_info("Node%d: mem_type:%d, mem_start:0x%llx, mem_size:0x%llx MB\n",
- (u32)node_id, mem_type, mem_start, mem_size);
- pr_info(" start_pfn:0x%llx, end_pfn:0x%llx, num_physpages:0x%lx\n",
- start_pfn, end_pfn, num_physpages);
- memblock_add_node(PFN_PHYS(start_pfn),
- PFN_PHYS(node_psize), node,
+ case UMA_VIDEO_RAM:
+ pr_info("Node %d, mem_type:%d\t[%pa], %pa bytes usable\n",
+ (u32)node_id, mem_type, &mem_start, &mem_size);
+ memblock_add_node(mem_start, mem_size, node,
MEMBLOCK_NONE);
break;
case SYSTEM_RAM_RESERVED:
- pr_info("Node%d: mem_type:%d, mem_start:0x%llx, mem_size:0x%llx MB\n",
- (u32)node_id, mem_type, mem_start, mem_size);
- memblock_reserve(((node_id << 44) + mem_start), mem_size << 20);
+ case VIDEO_ROM:
+ case ADAPTER_ROM:
+ case ACPI_TABLE:
+ case SMBIOS_TABLE:
+ pr_info("Node %d, mem_type:%d\t[%pa], %pa bytes reserved\n",
+ (u32)node_id, mem_type, &mem_start, &mem_size);
+ memblock_reserve(mem_start, mem_size);
+ break;
+ /* We should not reserve VUMA_VIDEO_RAM as it overlaps with MMIO */
+ case VUMA_VIDEO_RAM:
+ default:
+ pr_info("Node %d, mem_type:%d\t[%pa], %pa bytes unhandled\n",
+ (u32)node_id, mem_type, &mem_start, &mem_size);
break;
}
}
+
+ /* Reserve vgabios if it comes from firmware */
+ if (loongson_sysconf.vgabios_addr)
+ memblock_reserve(virt_to_phys((void *)loongson_sysconf.vgabios_addr),
+ SZ_256K);
}
#ifndef CONFIG_NUMA
diff --git a/arch/parisc/include/asm/bug.h b/arch/parisc/include/asm/bug.h
index 1641ff9a8b83..833555f74ffa 100644
--- a/arch/parisc/include/asm/bug.h
+++ b/arch/parisc/include/asm/bug.h
@@ -71,7 +71,7 @@
asm volatile("\n" \
"1:\t" PARISC_BUG_BREAK_ASM "\n" \
"\t.pushsection __bug_table,\"a\"\n" \
- "\t.align %2\n" \
+ "\t.align 4\n" \
"2:\t" __BUG_REL(1b) "\n" \
"\t.short %0\n" \
"\t.blockz %1-4-2\n" \
diff --git a/arch/powerpc/kernel/fpu.S b/arch/powerpc/kernel/fpu.S
index 6a9acfb690c9..2f8f3f93cbb6 100644
--- a/arch/powerpc/kernel/fpu.S
+++ b/arch/powerpc/kernel/fpu.S
@@ -23,6 +23,15 @@
#include <asm/feature-fixups.h>
#ifdef CONFIG_VSX
+#define __REST_1FPVSR(n,c,base) \
+BEGIN_FTR_SECTION \
+ b 2f; \
+END_FTR_SECTION_IFSET(CPU_FTR_VSX); \
+ REST_FPR(n,base); \
+ b 3f; \
+2: REST_VSR(n,c,base); \
+3:
+
#define __REST_32FPVSRS(n,c,base) \
BEGIN_FTR_SECTION \
b 2f; \
@@ -41,9 +50,11 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX); \
2: SAVE_32VSRS(n,c,base); \
3:
#else
+#define __REST_1FPVSR(n,b,base) REST_FPR(n, base)
#define __REST_32FPVSRS(n,b,base) REST_32FPRS(n, base)
#define __SAVE_32FPVSRS(n,b,base) SAVE_32FPRS(n, base)
#endif
+#define REST_1FPVSR(n,c,base) __REST_1FPVSR(n,__REG_##c,__REG_##base)
#define REST_32FPVSRS(n,c,base) __REST_32FPVSRS(n,__REG_##c,__REG_##base)
#define SAVE_32FPVSRS(n,c,base) __SAVE_32FPVSRS(n,__REG_##c,__REG_##base)
@@ -67,6 +78,7 @@ _GLOBAL(store_fp_state)
SAVE_32FPVSRS(0, R4, R3)
mffs fr0
stfd fr0,FPSTATE_FPSCR(r3)
+ REST_1FPVSR(0, R4, R3)
blr
EXPORT_SYMBOL(store_fp_state)
@@ -138,4 +150,5 @@ _GLOBAL(save_fpu)
2: SAVE_32FPVSRS(0, R4, R6)
mffs fr0
stfd fr0,FPSTATE_FPSCR(r6)
+ REST_1FPVSR(0, R4, R6)
blr
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 392404688cec..9452a54d356c 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -1198,11 +1198,11 @@ void kvmppc_save_user_regs(void)
usermsr = current->thread.regs->msr;
+ /* Caller has enabled FP/VEC/VSX/TM in MSR */
if (usermsr & MSR_FP)
- save_fpu(current);
-
+ __giveup_fpu(current);
if (usermsr & MSR_VEC)
- save_altivec(current);
+ __giveup_altivec(current);
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
if (usermsr & MSR_TM) {
diff --git a/arch/powerpc/kernel/trace/ftrace_entry.S b/arch/powerpc/kernel/trace/ftrace_entry.S
index 90701885762c..40677416d7b2 100644
--- a/arch/powerpc/kernel/trace/ftrace_entry.S
+++ b/arch/powerpc/kernel/trace/ftrace_entry.S
@@ -62,7 +62,7 @@
.endif
/* Save previous stack pointer (r1) */
- addi r8, r1, SWITCH_FRAME_SIZE
+ addi r8, r1, SWITCH_FRAME_SIZE+STACK_FRAME_MIN_SIZE
PPC_STL r8, GPR1(r1)
.if \allregs == 1
@@ -182,7 +182,7 @@ ftrace_no_trace:
mflr r3
mtctr r3
REST_GPR(3, r1)
- addi r1, r1, SWITCH_FRAME_SIZE
+ addi r1, r1, SWITCH_FRAME_SIZE+STACK_FRAME_MIN_SIZE
mtlr r0
bctr
#endif
diff --git a/arch/powerpc/kernel/vector.S b/arch/powerpc/kernel/vector.S
index 4094e4c4c77a..80b3f6e476b6 100644
--- a/arch/powerpc/kernel/vector.S
+++ b/arch/powerpc/kernel/vector.S
@@ -33,6 +33,7 @@ _GLOBAL(store_vr_state)
mfvscr v0
li r4, VRSTATE_VSCR
stvx v0, r4, r3
+ lvx v0, 0, r3
blr
EXPORT_SYMBOL(store_vr_state)
@@ -109,6 +110,7 @@ _GLOBAL(save_altivec)
mfvscr v0
li r4,VRSTATE_VSCR
stvx v0,r4,r7
+ lvx v0,0,r7
blr
#ifdef CONFIG_VSX
diff --git a/arch/riscv/boot/dts/microchip/mpfs-icicle-kit.dts b/arch/riscv/boot/dts/microchip/mpfs-icicle-kit.dts
index 90b261114763..dce96f27cc89 100644
--- a/arch/riscv/boot/dts/microchip/mpfs-icicle-kit.dts
+++ b/arch/riscv/boot/dts/microchip/mpfs-icicle-kit.dts
@@ -8,9 +8,6 @@
#include <dt-bindings/gpio/gpio.h>
#include <dt-bindings/leds/common.h>
-/* Clock frequency (in Hz) of the rtcclk */
-#define RTCCLK_FREQ 1000000
-
/ {
model = "Microchip PolarFire-SoC Icicle Kit";
compatible = "microchip,mpfs-icicle-reference-rtlv2210", "microchip,mpfs-icicle-kit",
@@ -29,10 +26,6 @@
stdout-path = "serial1:115200n8";
};
- cpus {
- timebase-frequency = <RTCCLK_FREQ>;
- };
-
leds {
compatible = "gpio-leds";
diff --git a/arch/riscv/boot/dts/microchip/mpfs-m100pfsevp.dts b/arch/riscv/boot/dts/microchip/mpfs-m100pfsevp.dts
index 184cb36a175e..a8d623ee9fa4 100644
--- a/arch/riscv/boot/dts/microchip/mpfs-m100pfsevp.dts
+++ b/arch/riscv/boot/dts/microchip/mpfs-m100pfsevp.dts
@@ -10,9 +10,6 @@
#include "mpfs.dtsi"
#include "mpfs-m100pfs-fabric.dtsi"
-/* Clock frequency (in Hz) of the rtcclk */
-#define MTIMER_FREQ 1000000
-
/ {
model = "Aries Embedded M100PFEVPS";
compatible = "aries,m100pfsevp", "microchip,mpfs";
@@ -33,10 +30,6 @@
stdout-path = "serial1:115200n8";
};
- cpus {
- timebase-frequency = <MTIMER_FREQ>;
- };
-
ddrc_cache_lo: memory@80000000 {
device_type = "memory";
reg = <0x0 0x80000000 0x0 0x40000000>;
diff --git a/arch/riscv/boot/dts/microchip/mpfs-polarberry.dts b/arch/riscv/boot/dts/microchip/mpfs-polarberry.dts
index c87cc2d8fe29..ea0808ab1042 100644
--- a/arch/riscv/boot/dts/microchip/mpfs-polarberry.dts
+++ b/arch/riscv/boot/dts/microchip/mpfs-polarberry.dts
@@ -6,9 +6,6 @@
#include "mpfs.dtsi"
#include "mpfs-polarberry-fabric.dtsi"
-/* Clock frequency (in Hz) of the rtcclk */
-#define MTIMER_FREQ 1000000
-
/ {
model = "Sundance PolarBerry";
compatible = "sundance,polarberry", "microchip,mpfs";
@@ -22,10 +19,6 @@
stdout-path = "serial0:115200n8";
};
- cpus {
- timebase-frequency = <MTIMER_FREQ>;
- };
-
ddrc_cache_lo: memory@80000000 {
device_type = "memory";
reg = <0x0 0x80000000 0x0 0x2e000000>;
diff --git a/arch/riscv/boot/dts/microchip/mpfs-sev-kit.dts b/arch/riscv/boot/dts/microchip/mpfs-sev-kit.dts
index 013cb666c72d..f9a890579438 100644
--- a/arch/riscv/boot/dts/microchip/mpfs-sev-kit.dts
+++ b/arch/riscv/boot/dts/microchip/mpfs-sev-kit.dts
@@ -6,9 +6,6 @@
#include "mpfs.dtsi"
#include "mpfs-sev-kit-fabric.dtsi"
-/* Clock frequency (in Hz) of the rtcclk */
-#define MTIMER_FREQ 1000000
-
/ {
#address-cells = <2>;
#size-cells = <2>;
@@ -28,10 +25,6 @@
stdout-path = "serial1:115200n8";
};
- cpus {
- timebase-frequency = <MTIMER_FREQ>;
- };
-
reserved-memory {
#address-cells = <2>;
#size-cells = <2>;
diff --git a/arch/riscv/boot/dts/microchip/mpfs-tysom-m.dts b/arch/riscv/boot/dts/microchip/mpfs-tysom-m.dts
index e0797c7e1b35..d1120f5f2c01 100644
--- a/arch/riscv/boot/dts/microchip/mpfs-tysom-m.dts
+++ b/arch/riscv/boot/dts/microchip/mpfs-tysom-m.dts
@@ -11,9 +11,6 @@
#include "mpfs.dtsi"
#include "mpfs-tysom-m-fabric.dtsi"
-/* Clock frequency (in Hz) of the rtcclk */
-#define MTIMER_FREQ 1000000
-
/ {
model = "Aldec TySOM-M-MPFS250T-REV2";
compatible = "aldec,tysom-m-mpfs250t-rev2", "microchip,mpfs";
@@ -34,10 +31,6 @@
stdout-path = "serial1:115200n8";
};
- cpus {
- timebase-frequency = <MTIMER_FREQ>;
- };
-
ddrc_cache_lo: memory@80000000 {
device_type = "memory";
reg = <0x0 0x80000000 0x0 0x30000000>;
diff --git a/arch/riscv/boot/dts/microchip/mpfs.dtsi b/arch/riscv/boot/dts/microchip/mpfs.dtsi
index a6faf24f1dba..266489d43912 100644
--- a/arch/riscv/boot/dts/microchip/mpfs.dtsi
+++ b/arch/riscv/boot/dts/microchip/mpfs.dtsi
@@ -13,6 +13,7 @@
cpus {
#address-cells = <1>;
#size-cells = <0>;
+ timebase-frequency = <1000000>;
cpu0: cpu@0 {
compatible = "sifive,e51", "sifive,rocket0", "riscv";
diff --git a/arch/riscv/boot/dts/sophgo/cv1800b.dtsi b/arch/riscv/boot/dts/sophgo/cv1800b.dtsi
index df40e87ee063..aec6401a467b 100644
--- a/arch/riscv/boot/dts/sophgo/cv1800b.dtsi
+++ b/arch/riscv/boot/dts/sophgo/cv1800b.dtsi
@@ -34,7 +34,6 @@
cpu0_intc: interrupt-controller {
compatible = "riscv,cpu-intc";
interrupt-controller;
- #address-cells = <0>;
#interrupt-cells = <1>;
};
};
diff --git a/arch/riscv/errata/andes/errata.c b/arch/riscv/errata/andes/errata.c
index 197db68cc8da..17a904869724 100644
--- a/arch/riscv/errata/andes/errata.c
+++ b/arch/riscv/errata/andes/errata.c
@@ -38,29 +38,35 @@ static long ax45mp_iocp_sw_workaround(void)
return ret.error ? 0 : ret.value;
}
-static bool errata_probe_iocp(unsigned int stage, unsigned long arch_id, unsigned long impid)
+static void errata_probe_iocp(unsigned int stage, unsigned long arch_id, unsigned long impid)
{
+ static bool done;
+
if (!IS_ENABLED(CONFIG_ERRATA_ANDES_CMO))
- return false;
+ return;
+
+ if (done)
+ return;
+
+ done = true;
if (arch_id != ANDESTECH_AX45MP_MARCHID || impid != ANDESTECH_AX45MP_MIMPID)
- return false;
+ return;
if (!ax45mp_iocp_sw_workaround())
- return false;
+ return;
/* Set this just to make core cbo code happy */
riscv_cbom_block_size = 1;
riscv_noncoherent_supported();
-
- return true;
}
void __init_or_module andes_errata_patch_func(struct alt_entry *begin, struct alt_entry *end,
unsigned long archid, unsigned long impid,
unsigned int stage)
{
- errata_probe_iocp(stage, archid, impid);
+ if (stage == RISCV_ALTERNATIVES_BOOT)
+ errata_probe_iocp(stage, archid, impid);
/* we have nothing to patch here ATM so just return back */
}
diff --git a/arch/riscv/kernel/head.S b/arch/riscv/kernel/head.S
index b77397432403..76ace1e0b46f 100644
--- a/arch/riscv/kernel/head.S
+++ b/arch/riscv/kernel/head.S
@@ -154,7 +154,6 @@ secondary_start_sbi:
XIP_FIXUP_OFFSET a3
add a3, a3, a1
REG_L sp, (a3)
- scs_load_current
.Lsecondary_start_common:
@@ -165,6 +164,7 @@ secondary_start_sbi:
call relocate_enable_mmu
#endif
call .Lsetup_trap_vector
+ scs_load_current
tail smp_callin
#endif /* CONFIG_SMP */
diff --git a/arch/riscv/kernel/module.c b/arch/riscv/kernel/module.c
index 56a8c78e9e21..aac019ed63b1 100644
--- a/arch/riscv/kernel/module.c
+++ b/arch/riscv/kernel/module.c
@@ -40,15 +40,6 @@ struct relocation_handlers {
long buffer);
};
-unsigned int initialize_relocation_hashtable(unsigned int num_relocations);
-void process_accumulated_relocations(struct module *me);
-int add_relocation_to_accumulate(struct module *me, int type, void *location,
- unsigned int hashtable_bits, Elf_Addr v);
-
-struct hlist_head *relocation_hashtable;
-
-struct list_head used_buckets_list;
-
/*
* The auipc+jalr instruction pair can reach any PC-relative offset
* in the range [-2^31 - 2^11, 2^31 - 2^11)
@@ -64,7 +55,7 @@ static bool riscv_insn_valid_32bit_offset(ptrdiff_t val)
static int riscv_insn_rmw(void *location, u32 keep, u32 set)
{
- u16 *parcel = location;
+ __le16 *parcel = location;
u32 insn = (u32)le16_to_cpu(parcel[0]) | (u32)le16_to_cpu(parcel[1]) << 16;
insn &= keep;
@@ -77,7 +68,7 @@ static int riscv_insn_rmw(void *location, u32 keep, u32 set)
static int riscv_insn_rvc_rmw(void *location, u16 keep, u16 set)
{
- u16 *parcel = location;
+ __le16 *parcel = location;
u16 insn = le16_to_cpu(*parcel);
insn &= keep;
@@ -604,7 +595,10 @@ static const struct relocation_handlers reloc_handlers[] = {
/* 192-255 nonstandard ABI extensions */
};
-void process_accumulated_relocations(struct module *me)
+static void
+process_accumulated_relocations(struct module *me,
+ struct hlist_head **relocation_hashtable,
+ struct list_head *used_buckets_list)
{
/*
* Only ADD/SUB/SET/ULEB128 should end up here.
@@ -624,18 +618,25 @@ void process_accumulated_relocations(struct module *me)
* - Each relocation entry for a location address
*/
struct used_bucket *bucket_iter;
+ struct used_bucket *bucket_iter_tmp;
struct relocation_head *rel_head_iter;
+ struct hlist_node *rel_head_iter_tmp;
struct relocation_entry *rel_entry_iter;
+ struct relocation_entry *rel_entry_iter_tmp;
int curr_type;
void *location;
long buffer;
- list_for_each_entry(bucket_iter, &used_buckets_list, head) {
- hlist_for_each_entry(rel_head_iter, bucket_iter->bucket, node) {
+ list_for_each_entry_safe(bucket_iter, bucket_iter_tmp,
+ used_buckets_list, head) {
+ hlist_for_each_entry_safe(rel_head_iter, rel_head_iter_tmp,
+ bucket_iter->bucket, node) {
buffer = 0;
location = rel_head_iter->location;
- list_for_each_entry(rel_entry_iter,
- rel_head_iter->rel_entry, head) {
+ list_for_each_entry_safe(rel_entry_iter,
+ rel_entry_iter_tmp,
+ rel_head_iter->rel_entry,
+ head) {
curr_type = rel_entry_iter->type;
reloc_handlers[curr_type].reloc_handler(
me, &buffer, rel_entry_iter->value);
@@ -648,11 +649,14 @@ void process_accumulated_relocations(struct module *me)
kfree(bucket_iter);
}
- kfree(relocation_hashtable);
+ kfree(*relocation_hashtable);
}
-int add_relocation_to_accumulate(struct module *me, int type, void *location,
- unsigned int hashtable_bits, Elf_Addr v)
+static int add_relocation_to_accumulate(struct module *me, int type,
+ void *location,
+ unsigned int hashtable_bits, Elf_Addr v,
+ struct hlist_head *relocation_hashtable,
+ struct list_head *used_buckets_list)
{
struct relocation_entry *entry;
struct relocation_head *rel_head;
@@ -661,6 +665,10 @@ int add_relocation_to_accumulate(struct module *me, int type, void *location,
unsigned long hash;
entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+
+ if (!entry)
+ return -ENOMEM;
+
INIT_LIST_HEAD(&entry->head);
entry->type = type;
entry->value = v;
@@ -669,7 +677,10 @@ int add_relocation_to_accumulate(struct module *me, int type, void *location,
current_head = &relocation_hashtable[hash];
- /* Find matching location (if any) */
+ /*
+ * Search for the relocation_head for the relocations that happen at the
+ * provided location
+ */
bool found = false;
struct relocation_head *rel_head_iter;
@@ -681,19 +692,45 @@ int add_relocation_to_accumulate(struct module *me, int type, void *location,
}
}
+ /*
+ * If there has not yet been any relocations at the provided location,
+ * create a relocation_head for that location and populate it with this
+ * relocation_entry.
+ */
if (!found) {
rel_head = kmalloc(sizeof(*rel_head), GFP_KERNEL);
+
+ if (!rel_head) {
+ kfree(entry);
+ return -ENOMEM;
+ }
+
rel_head->rel_entry =
kmalloc(sizeof(struct list_head), GFP_KERNEL);
+
+ if (!rel_head->rel_entry) {
+ kfree(entry);
+ kfree(rel_head);
+ return -ENOMEM;
+ }
+
INIT_LIST_HEAD(rel_head->rel_entry);
rel_head->location = location;
INIT_HLIST_NODE(&rel_head->node);
if (!current_head->first) {
bucket =
kmalloc(sizeof(struct used_bucket), GFP_KERNEL);
+
+ if (!bucket) {
+ kfree(entry);
+ kfree(rel_head);
+ kfree(rel_head->rel_entry);
+ return -ENOMEM;
+ }
+
INIT_LIST_HEAD(&bucket->head);
bucket->bucket = current_head;
- list_add(&bucket->head, &used_buckets_list);
+ list_add(&bucket->head, used_buckets_list);
}
hlist_add_head(&rel_head->node, current_head);
}
@@ -704,7 +741,9 @@ int add_relocation_to_accumulate(struct module *me, int type, void *location,
return 0;
}
-unsigned int initialize_relocation_hashtable(unsigned int num_relocations)
+static unsigned int
+initialize_relocation_hashtable(unsigned int num_relocations,
+ struct hlist_head **relocation_hashtable)
{
/* Can safely assume that bits is not greater than sizeof(long) */
unsigned long hashtable_size = roundup_pow_of_two(num_relocations);
@@ -720,12 +759,13 @@ unsigned int initialize_relocation_hashtable(unsigned int num_relocations)
hashtable_size <<= should_double_size;
- relocation_hashtable = kmalloc_array(hashtable_size,
- sizeof(*relocation_hashtable),
- GFP_KERNEL);
- __hash_init(relocation_hashtable, hashtable_size);
+ *relocation_hashtable = kmalloc_array(hashtable_size,
+ sizeof(*relocation_hashtable),
+ GFP_KERNEL);
+ if (!*relocation_hashtable)
+ return -ENOMEM;
- INIT_LIST_HEAD(&used_buckets_list);
+ __hash_init(*relocation_hashtable, hashtable_size);
return hashtable_bits;
}
@@ -742,7 +782,17 @@ int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab,
Elf_Addr v;
int res;
unsigned int num_relocations = sechdrs[relsec].sh_size / sizeof(*rel);
- unsigned int hashtable_bits = initialize_relocation_hashtable(num_relocations);
+ struct hlist_head *relocation_hashtable;
+ struct list_head used_buckets_list;
+ unsigned int hashtable_bits;
+
+ hashtable_bits = initialize_relocation_hashtable(num_relocations,
+ &relocation_hashtable);
+
+ if (hashtable_bits < 0)
+ return hashtable_bits;
+
+ INIT_LIST_HEAD(&used_buckets_list);
pr_debug("Applying relocate section %u to %u\n", relsec,
sechdrs[relsec].sh_info);
@@ -823,14 +873,18 @@ int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab,
}
if (reloc_handlers[type].accumulate_handler)
- res = add_relocation_to_accumulate(me, type, location, hashtable_bits, v);
+ res = add_relocation_to_accumulate(me, type, location,
+ hashtable_bits, v,
+ relocation_hashtable,
+ &used_buckets_list);
else
res = handler(me, location, v);
if (res)
return res;
}
- process_accumulated_relocations(me);
+ process_accumulated_relocations(me, &relocation_hashtable,
+ &used_buckets_list);
return 0;
}
diff --git a/arch/riscv/kernel/sys_riscv.c b/arch/riscv/kernel/sys_riscv.c
index c712037dbe10..a2ca5b7756a5 100644
--- a/arch/riscv/kernel/sys_riscv.c
+++ b/arch/riscv/kernel/sys_riscv.c
@@ -169,7 +169,7 @@ static void hwprobe_isa_ext0(struct riscv_hwprobe *pair,
pair->value &= ~missing;
}
-static bool hwprobe_ext0_has(const struct cpumask *cpus, unsigned long ext)
+static bool hwprobe_ext0_has(const struct cpumask *cpus, u64 ext)
{
struct riscv_hwprobe pair;
diff --git a/arch/riscv/kernel/tests/module_test/test_uleb128.S b/arch/riscv/kernel/tests/module_test/test_uleb128.S
index 90f22049d553..8515ed7cd8c1 100644
--- a/arch/riscv/kernel/tests/module_test/test_uleb128.S
+++ b/arch/riscv/kernel/tests/module_test/test_uleb128.S
@@ -6,13 +6,13 @@
.text
.global test_uleb_basic
test_uleb_basic:
- ld a0, second
+ lw a0, second
addi a0, a0, -127
ret
.global test_uleb_large
test_uleb_large:
- ld a0, fourth
+ lw a0, fourth
addi a0, a0, -0x07e8
ret
@@ -22,10 +22,10 @@ first:
second:
.reloc second, R_RISCV_SET_ULEB128, second
.reloc second, R_RISCV_SUB_ULEB128, first
- .dword 0
+ .word 0
third:
.space 1000
fourth:
.reloc fourth, R_RISCV_SET_ULEB128, fourth
.reloc fourth, R_RISCV_SUB_ULEB128, third
- .dword 0
+ .word 0
diff --git a/arch/riscv/kernel/traps_misaligned.c b/arch/riscv/kernel/traps_misaligned.c
index 5eba37147caa..5255f8134aef 100644
--- a/arch/riscv/kernel/traps_misaligned.c
+++ b/arch/riscv/kernel/traps_misaligned.c
@@ -550,16 +550,14 @@ int handle_misaligned_store(struct pt_regs *regs)
} else if ((insn & INSN_MASK_C_SD) == INSN_MATCH_C_SD) {
len = 8;
val.data_ulong = GET_RS2S(insn, regs);
- } else if ((insn & INSN_MASK_C_SDSP) == INSN_MATCH_C_SDSP &&
- ((insn >> SH_RD) & 0x1f)) {
+ } else if ((insn & INSN_MASK_C_SDSP) == INSN_MATCH_C_SDSP) {
len = 8;
val.data_ulong = GET_RS2C(insn, regs);
#endif
} else if ((insn & INSN_MASK_C_SW) == INSN_MATCH_C_SW) {
len = 4;
val.data_ulong = GET_RS2S(insn, regs);
- } else if ((insn & INSN_MASK_C_SWSP) == INSN_MATCH_C_SWSP &&
- ((insn >> SH_RD) & 0x1f)) {
+ } else if ((insn & INSN_MASK_C_SWSP) == INSN_MATCH_C_SWSP) {
len = 4;
val.data_ulong = GET_RS2C(insn, regs);
} else if ((insn & INSN_MASK_C_FSD) == INSN_MATCH_C_FSD) {
diff --git a/arch/s390/kvm/vsie.c b/arch/s390/kvm/vsie.c
index 02dcbe82a8e5..8207a892bbe2 100644
--- a/arch/s390/kvm/vsie.c
+++ b/arch/s390/kvm/vsie.c
@@ -587,10 +587,6 @@ void kvm_s390_vsie_gmap_notifier(struct gmap *gmap, unsigned long start,
if (!gmap_is_shadow(gmap))
return;
- if (start >= 1UL << 31)
- /* We are only interested in prefix pages */
- return;
-
/*
* Only new shadow blocks are added to the list during runtime,
* therefore we can safely reference them all the time.
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index 3bd2ab2a9a34..5cb92941540b 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -756,7 +756,7 @@ void ptep_zap_unused(struct mm_struct *mm, unsigned long addr,
pte_clear(mm, addr, ptep);
}
if (reset)
- pgste_val(pgste) &= ~_PGSTE_GPS_USAGE_MASK;
+ pgste_val(pgste) &= ~(_PGSTE_GPS_USAGE_MASK | _PGSTE_GPS_NODAT);
pgste_set_unlock(ptep, pgste);
preempt_enable();
}
diff --git a/arch/x86/coco/tdx/tdx.c b/arch/x86/coco/tdx/tdx.c
index 1b5d17a9f70d..cf1f13c82175 100644
--- a/arch/x86/coco/tdx/tdx.c
+++ b/arch/x86/coco/tdx/tdx.c
@@ -10,6 +10,7 @@
#include <asm/coco.h>
#include <asm/tdx.h>
#include <asm/vmx.h>
+#include <asm/ia32.h>
#include <asm/insn.h>
#include <asm/insn-eval.h>
#include <asm/pgtable.h>
diff --git a/arch/x86/entry/common.c b/arch/x86/entry/common.c
index d813160b14d8..6356060caaf3 100644
--- a/arch/x86/entry/common.c
+++ b/arch/x86/entry/common.c
@@ -26,6 +26,7 @@
#include <xen/events.h>
#endif
+#include <asm/apic.h>
#include <asm/desc.h>
#include <asm/traps.h>
#include <asm/vdso.h>
@@ -167,7 +168,96 @@ static __always_inline void do_syscall_32_irqs_on(struct pt_regs *regs, int nr)
}
}
-/* Handles int $0x80 */
+#ifdef CONFIG_IA32_EMULATION
+static __always_inline bool int80_is_external(void)
+{
+ const unsigned int offs = (0x80 / 32) * 0x10;
+ const u32 bit = BIT(0x80 % 32);
+
+ /* The local APIC on XENPV guests is fake */
+ if (cpu_feature_enabled(X86_FEATURE_XENPV))
+ return false;
+
+ /*
+ * If vector 0x80 is set in the APIC ISR then this is an external
+ * interrupt. Either from broken hardware or injected by a VMM.
+ *
+ * Note: In guest mode this is only valid for secure guests where
+ * the secure module fully controls the vAPIC exposed to the guest.
+ */
+ return apic_read(APIC_ISR + offs) & bit;
+}
+
+/**
+ * int80_emulation - 32-bit legacy syscall entry
+ *
+ * This entry point can be used by 32-bit and 64-bit programs to perform
+ * 32-bit system calls. Instances of INT $0x80 can be found inline in
+ * various programs and libraries. It is also used by the vDSO's
+ * __kernel_vsyscall fallback for hardware that doesn't support a faster
+ * entry method. Restarted 32-bit system calls also fall back to INT
+ * $0x80 regardless of what instruction was originally used to do the
+ * system call.
+ *
+ * This is considered a slow path. It is not used by most libc
+ * implementations on modern hardware except during process startup.
+ *
+ * The arguments for the INT $0x80 based syscall are on stack in the
+ * pt_regs structure:
+ * eax: system call number
+ * ebx, ecx, edx, esi, edi, ebp: arg1 - arg 6
+ */
+DEFINE_IDTENTRY_RAW(int80_emulation)
+{
+ int nr;
+
+ /* Kernel does not use INT $0x80! */
+ if (unlikely(!user_mode(regs))) {
+ irqentry_enter(regs);
+ instrumentation_begin();
+ panic("Unexpected external interrupt 0x80\n");
+ }
+
+ /*
+ * Establish kernel context for instrumentation, including for
+ * int80_is_external() below which calls into the APIC driver.
+ * Identical for soft and external interrupts.
+ */
+ enter_from_user_mode(regs);
+
+ instrumentation_begin();
+ add_random_kstack_offset();
+
+ /* Validate that this is a soft interrupt to the extent possible */
+ if (unlikely(int80_is_external()))
+ panic("Unexpected external interrupt 0x80\n");
+
+ /*
+ * The low level idtentry code pushed -1 into regs::orig_ax
+ * and regs::ax contains the syscall number.
+ *
+ * User tracing code (ptrace or signal handlers) might assume
+ * that the regs::orig_ax contains a 32-bit number on invoking
+ * a 32-bit syscall.
+ *
+ * Establish the syscall convention by saving the 32bit truncated
+ * syscall number in regs::orig_ax and by invalidating regs::ax.
+ */
+ regs->orig_ax = regs->ax & GENMASK(31, 0);
+ regs->ax = -ENOSYS;
+
+ nr = syscall_32_enter(regs);
+
+ local_irq_enable();
+ nr = syscall_enter_from_user_mode_work(regs, nr);
+ do_syscall_32_irqs_on(regs, nr);
+
+ instrumentation_end();
+ syscall_exit_to_user_mode(regs);
+}
+#else /* CONFIG_IA32_EMULATION */
+
+/* Handles int $0x80 on a 32bit kernel */
__visible noinstr void do_int80_syscall_32(struct pt_regs *regs)
{
int nr = syscall_32_enter(regs);
@@ -186,6 +276,7 @@ __visible noinstr void do_int80_syscall_32(struct pt_regs *regs)
instrumentation_end();
syscall_exit_to_user_mode(regs);
}
+#endif /* !CONFIG_IA32_EMULATION */
static noinstr bool __do_fast_syscall_32(struct pt_regs *regs)
{
diff --git a/arch/x86/entry/entry_64_compat.S b/arch/x86/entry/entry_64_compat.S
index 27c05d08558a..de94e2e84ecc 100644
--- a/arch/x86/entry/entry_64_compat.S
+++ b/arch/x86/entry/entry_64_compat.S
@@ -275,80 +275,3 @@ SYM_INNER_LABEL(entry_SYSRETL_compat_end, SYM_L_GLOBAL)
ANNOTATE_NOENDBR
int3
SYM_CODE_END(entry_SYSCALL_compat)
-
-/*
- * 32-bit legacy system call entry.
- *
- * 32-bit x86 Linux system calls traditionally used the INT $0x80
- * instruction. INT $0x80 lands here.
- *
- * This entry point can be used by 32-bit and 64-bit programs to perform
- * 32-bit system calls. Instances of INT $0x80 can be found inline in
- * various programs and libraries. It is also used by the vDSO's
- * __kernel_vsyscall fallback for hardware that doesn't support a faster
- * entry method. Restarted 32-bit system calls also fall back to INT
- * $0x80 regardless of what instruction was originally used to do the
- * system call.
- *
- * This is considered a slow path. It is not used by most libc
- * implementations on modern hardware except during process startup.
- *
- * Arguments:
- * eax system call number
- * ebx arg1
- * ecx arg2
- * edx arg3
- * esi arg4
- * edi arg5
- * ebp arg6
- */
-SYM_CODE_START(entry_INT80_compat)
- UNWIND_HINT_ENTRY
- ENDBR
- /*
- * Interrupts are off on entry.
- */
- ASM_CLAC /* Do this early to minimize exposure */
- ALTERNATIVE "swapgs", "", X86_FEATURE_XENPV
-
- /*
- * User tracing code (ptrace or signal handlers) might assume that
- * the saved RAX contains a 32-bit number when we're invoking a 32-bit
- * syscall. Just in case the high bits are nonzero, zero-extend
- * the syscall number. (This could almost certainly be deleted
- * with no ill effects.)
- */
- movl %eax, %eax
-
- /* switch to thread stack expects orig_ax and rdi to be pushed */
- pushq %rax /* pt_regs->orig_ax */
-
- /* Need to switch before accessing the thread stack. */
- SWITCH_TO_KERNEL_CR3 scratch_reg=%rax
-
- /* In the Xen PV case we already run on the thread stack. */
- ALTERNATIVE "", "jmp .Lint80_keep_stack", X86_FEATURE_XENPV
-
- movq %rsp, %rax
- movq PER_CPU_VAR(pcpu_hot + X86_top_of_stack), %rsp
-
- pushq 5*8(%rax) /* regs->ss */
- pushq 4*8(%rax) /* regs->rsp */
- pushq 3*8(%rax) /* regs->eflags */
- pushq 2*8(%rax) /* regs->cs */
- pushq 1*8(%rax) /* regs->ip */
- pushq 0*8(%rax) /* regs->orig_ax */
-.Lint80_keep_stack:
-
- PUSH_AND_CLEAR_REGS rax=$-ENOSYS
- UNWIND_HINT_REGS
-
- cld
-
- IBRS_ENTER
- UNTRAIN_RET
-
- movq %rsp, %rdi
- call do_int80_syscall_32
- jmp swapgs_restore_regs_and_return_to_usermode
-SYM_CODE_END(entry_INT80_compat)
diff --git a/arch/x86/include/asm/ia32.h b/arch/x86/include/asm/ia32.h
index 5a2ae24b1204..9805629479d9 100644
--- a/arch/x86/include/asm/ia32.h
+++ b/arch/x86/include/asm/ia32.h
@@ -75,6 +75,11 @@ static inline bool ia32_enabled(void)
return __ia32_enabled;
}
+static inline void ia32_disable(void)
+{
+ __ia32_enabled = false;
+}
+
#else /* !CONFIG_IA32_EMULATION */
static inline bool ia32_enabled(void)
@@ -82,6 +87,8 @@ static inline bool ia32_enabled(void)
return IS_ENABLED(CONFIG_X86_32);
}
+static inline void ia32_disable(void) {}
+
#endif
#endif /* _ASM_X86_IA32_H */
diff --git a/arch/x86/include/asm/idtentry.h b/arch/x86/include/asm/idtentry.h
index 05fd175cec7d..13639e57e1f8 100644
--- a/arch/x86/include/asm/idtentry.h
+++ b/arch/x86/include/asm/idtentry.h
@@ -569,6 +569,10 @@ DECLARE_IDTENTRY_RAW(X86_TRAP_UD, exc_invalid_op);
DECLARE_IDTENTRY_RAW(X86_TRAP_BP, exc_int3);
DECLARE_IDTENTRY_RAW_ERRORCODE(X86_TRAP_PF, exc_page_fault);
+#if defined(CONFIG_IA32_EMULATION)
+DECLARE_IDTENTRY_RAW(IA32_SYSCALL_VECTOR, int80_emulation);
+#endif
+
#ifdef CONFIG_X86_MCE
#ifdef CONFIG_X86_64
DECLARE_IDTENTRY_MCE(X86_TRAP_MC, exc_machine_check);
diff --git a/arch/x86/include/asm/proto.h b/arch/x86/include/asm/proto.h
index 4d84122bd643..484f4f0131a5 100644
--- a/arch/x86/include/asm/proto.h
+++ b/arch/x86/include/asm/proto.h
@@ -32,10 +32,6 @@ void entry_SYSCALL_compat(void);
void entry_SYSCALL_compat_safe_stack(void);
void entry_SYSRETL_compat_unsafe_stack(void);
void entry_SYSRETL_compat_end(void);
-void entry_INT80_compat(void);
-#ifdef CONFIG_XEN_PV
-void xen_entry_INT80_compat(void);
-#endif
#else /* !CONFIG_IA32_EMULATION */
#define entry_SYSCALL_compat NULL
#define entry_SYSENTER_compat NULL
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index a7eab05e5f29..f322ebd053a9 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -1320,6 +1320,9 @@ static void zenbleed_check_cpu(void *unused)
void amd_check_microcode(void)
{
+ if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
+ return;
+
on_each_cpu(zenbleed_check_cpu, NULL, 1);
}
diff --git a/arch/x86/kernel/idt.c b/arch/x86/kernel/idt.c
index 8857abc706e4..660b601f1d6c 100644
--- a/arch/x86/kernel/idt.c
+++ b/arch/x86/kernel/idt.c
@@ -121,7 +121,7 @@ static const __initconst struct idt_data def_idts[] = {
static const struct idt_data ia32_idt[] __initconst = {
#if defined(CONFIG_IA32_EMULATION)
- SYSG(IA32_SYSCALL_VECTOR, entry_INT80_compat),
+ SYSG(IA32_SYSCALL_VECTOR, asm_int80_emulation),
#elif defined(CONFIG_X86_32)
SYSG(IA32_SYSCALL_VECTOR, entry_INT80_32),
#endif
diff --git a/arch/x86/kernel/sev.c b/arch/x86/kernel/sev.c
index 70472eebe719..c67285824e82 100644
--- a/arch/x86/kernel/sev.c
+++ b/arch/x86/kernel/sev.c
@@ -1234,10 +1234,6 @@ void setup_ghcb(void)
if (!cc_platform_has(CC_ATTR_GUEST_STATE_ENCRYPT))
return;
- /* First make sure the hypervisor talks a supported protocol. */
- if (!sev_es_negotiate_protocol())
- sev_es_terminate(SEV_TERM_SET_GEN, GHCB_SEV_ES_GEN_REQ);
-
/*
* Check whether the runtime #VC exception handler is active. It uses
* the per-CPU GHCB page which is set up by sev_es_init_vc_handling().
@@ -1255,6 +1251,13 @@ void setup_ghcb(void)
}
/*
+ * Make sure the hypervisor talks a supported protocol.
+ * This gets called only in the BSP boot phase.
+ */
+ if (!sev_es_negotiate_protocol())
+ sev_es_terminate(SEV_TERM_SET_GEN, GHCB_SEV_ES_GEN_REQ);
+
+ /*
* Clear the boot_ghcb. The first exception comes in before the bss
* section is cleared.
*/
diff --git a/arch/x86/kvm/debugfs.c b/arch/x86/kvm/debugfs.c
index ee8c4c3496ed..eea6ea7f14af 100644
--- a/arch/x86/kvm/debugfs.c
+++ b/arch/x86/kvm/debugfs.c
@@ -182,6 +182,7 @@ static int kvm_mmu_rmaps_stat_release(struct inode *inode, struct file *file)
}
static const struct file_operations mmu_rmaps_stat_fops = {
+ .owner = THIS_MODULE,
.open = kvm_mmu_rmaps_stat_open,
.read = seq_read,
.llseek = seq_lseek,
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index 712146312358..f3bb30b40876 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -1855,15 +1855,17 @@ void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
bool old_paging = is_paging(vcpu);
#ifdef CONFIG_X86_64
- if (vcpu->arch.efer & EFER_LME && !vcpu->arch.guest_state_protected) {
+ if (vcpu->arch.efer & EFER_LME) {
if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) {
vcpu->arch.efer |= EFER_LMA;
- svm->vmcb->save.efer |= EFER_LMA | EFER_LME;
+ if (!vcpu->arch.guest_state_protected)
+ svm->vmcb->save.efer |= EFER_LMA | EFER_LME;
}
if (is_paging(vcpu) && !(cr0 & X86_CR0_PG)) {
vcpu->arch.efer &= ~EFER_LMA;
- svm->vmcb->save.efer &= ~(EFER_LMA | EFER_LME);
+ if (!vcpu->arch.guest_state_protected)
+ svm->vmcb->save.efer &= ~(EFER_LMA | EFER_LME);
}
}
#endif
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 2c924075f6f1..1a3aaa7dafae 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -5518,8 +5518,8 @@ static void kvm_vcpu_ioctl_x86_get_xsave2(struct kvm_vcpu *vcpu,
static void kvm_vcpu_ioctl_x86_get_xsave(struct kvm_vcpu *vcpu,
struct kvm_xsave *guest_xsave)
{
- return kvm_vcpu_ioctl_x86_get_xsave2(vcpu, (void *)guest_xsave->region,
- sizeof(guest_xsave->region));
+ kvm_vcpu_ioctl_x86_get_xsave2(vcpu, (void *)guest_xsave->region,
+ sizeof(guest_xsave->region));
}
static int kvm_vcpu_ioctl_x86_set_xsave(struct kvm_vcpu *vcpu,
@@ -13031,7 +13031,10 @@ bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
if (vcpu->arch.guest_state_protected)
return true;
- return vcpu->arch.preempted_in_kernel;
+ if (vcpu != kvm_get_running_vcpu())
+ return vcpu->arch.preempted_in_kernel;
+
+ return static_call(kvm_x86_get_cpl)(vcpu) == 0;
}
unsigned long kvm_arch_vcpu_get_ip(struct kvm_vcpu *vcpu)
diff --git a/arch/x86/mm/mem_encrypt_amd.c b/arch/x86/mm/mem_encrypt_amd.c
index a68f2dda0948..70b91de2e053 100644
--- a/arch/x86/mm/mem_encrypt_amd.c
+++ b/arch/x86/mm/mem_encrypt_amd.c
@@ -32,6 +32,7 @@
#include <asm/msr.h>
#include <asm/cmdline.h>
#include <asm/sev.h>
+#include <asm/ia32.h>
#include "mm_internal.h"
@@ -481,6 +482,16 @@ void __init sme_early_init(void)
*/
if (sev_status & MSR_AMD64_SEV_ES_ENABLED)
x86_cpuinit.parallel_bringup = false;
+
+ /*
+ * The VMM is capable of injecting interrupt 0x80 and triggering the
+ * compatibility syscall path.
+ *
+ * By default, the 32-bit emulation is disabled in order to ensure
+ * the safety of the VM.
+ */
+ if (sev_status & MSR_AMD64_SEV_ENABLED)
+ ia32_disable();
}
void __init mem_encrypt_free_decrypted_mem(void)
diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
index 8c10d9abc239..e89e415aa743 100644
--- a/arch/x86/net/bpf_jit_comp.c
+++ b/arch/x86/net/bpf_jit_comp.c
@@ -3025,3 +3025,49 @@ void arch_bpf_stack_walk(bool (*consume_fn)(void *cookie, u64 ip, u64 sp, u64 bp
#endif
WARN(1, "verification of programs using bpf_throw should have failed\n");
}
+
+void bpf_arch_poke_desc_update(struct bpf_jit_poke_descriptor *poke,
+ struct bpf_prog *new, struct bpf_prog *old)
+{
+ u8 *old_addr, *new_addr, *old_bypass_addr;
+ int ret;
+
+ old_bypass_addr = old ? NULL : poke->bypass_addr;
+ old_addr = old ? (u8 *)old->bpf_func + poke->adj_off : NULL;
+ new_addr = new ? (u8 *)new->bpf_func + poke->adj_off : NULL;
+
+ /*
+ * On program loading or teardown, the program's kallsym entry
+ * might not be in place, so we use __bpf_arch_text_poke to skip
+ * the kallsyms check.
+ */
+ if (new) {
+ ret = __bpf_arch_text_poke(poke->tailcall_target,
+ BPF_MOD_JUMP,
+ old_addr, new_addr);
+ BUG_ON(ret < 0);
+ if (!old) {
+ ret = __bpf_arch_text_poke(poke->tailcall_bypass,
+ BPF_MOD_JUMP,
+ poke->bypass_addr,
+ NULL);
+ BUG_ON(ret < 0);
+ }
+ } else {
+ ret = __bpf_arch_text_poke(poke->tailcall_bypass,
+ BPF_MOD_JUMP,
+ old_bypass_addr,
+ poke->bypass_addr);
+ BUG_ON(ret < 0);
+ /* let other CPUs finish the execution of program
+ * so that it will not possible to expose them
+ * to invalid nop, stack unwind, nop state
+ */
+ if (!ret)
+ synchronize_rcu();
+ ret = __bpf_arch_text_poke(poke->tailcall_target,
+ BPF_MOD_JUMP,
+ old_addr, NULL);
+ BUG_ON(ret < 0);
+ }
+}
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index 0337392a3121..3c61bb98c10e 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -33,9 +33,12 @@ EXPORT_SYMBOL_GPL(hypercall_page);
* and xen_vcpu_setup for details. By default it points to share_info->vcpu_info
* but during boot it is switched to point to xen_vcpu_info.
* The pointer is used in xen_evtchn_do_upcall to acknowledge pending events.
+ * Make sure that xen_vcpu_info doesn't cross a page boundary by making it
+ * cache-line aligned (the struct is guaranteed to have a size of 64 bytes,
+ * which matches the cache line size of 64-bit x86 processors).
*/
DEFINE_PER_CPU(struct vcpu_info *, xen_vcpu);
-DEFINE_PER_CPU(struct vcpu_info, xen_vcpu_info);
+DEFINE_PER_CPU_ALIGNED(struct vcpu_info, xen_vcpu_info);
/* Linux <-> Xen vCPU id mapping */
DEFINE_PER_CPU(uint32_t, xen_vcpu_id);
@@ -160,6 +163,7 @@ void xen_vcpu_setup(int cpu)
int err;
struct vcpu_info *vcpup;
+ BUILD_BUG_ON(sizeof(*vcpup) > SMP_CACHE_BYTES);
BUG_ON(HYPERVISOR_shared_info == &xen_dummy_shared_info);
/*
diff --git a/arch/x86/xen/enlighten_pv.c b/arch/x86/xen/enlighten_pv.c
index bbbfdd495ebd..aeb33e0a3f76 100644
--- a/arch/x86/xen/enlighten_pv.c
+++ b/arch/x86/xen/enlighten_pv.c
@@ -704,7 +704,7 @@ static struct trap_array_entry trap_array[] = {
TRAP_ENTRY(exc_int3, false ),
TRAP_ENTRY(exc_overflow, false ),
#ifdef CONFIG_IA32_EMULATION
- { entry_INT80_compat, xen_entry_INT80_compat, false },
+ TRAP_ENTRY(int80_emulation, false ),
#endif
TRAP_ENTRY(exc_page_fault, false ),
TRAP_ENTRY(exc_divide_error, false ),
diff --git a/arch/x86/xen/xen-asm.S b/arch/x86/xen/xen-asm.S
index 9e5e68008785..1a9cd18dfbd3 100644
--- a/arch/x86/xen/xen-asm.S
+++ b/arch/x86/xen/xen-asm.S
@@ -156,7 +156,7 @@ xen_pv_trap asm_xenpv_exc_machine_check
#endif /* CONFIG_X86_MCE */
xen_pv_trap asm_exc_simd_coprocessor_error
#ifdef CONFIG_IA32_EMULATION
-xen_pv_trap entry_INT80_compat
+xen_pv_trap asm_int80_emulation
#endif
xen_pv_trap asm_exc_xen_unknown_trap
xen_pv_trap asm_exc_xen_hypervisor_callback
diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
index 408a2aa66c69..a87ab36889e7 100644
--- a/arch/x86/xen/xen-ops.h
+++ b/arch/x86/xen/xen-ops.h
@@ -21,7 +21,7 @@ extern void *xen_initial_gdt;
struct trap_info;
void xen_copy_trap_info(struct trap_info *traps);
-DECLARE_PER_CPU(struct vcpu_info, xen_vcpu_info);
+DECLARE_PER_CPU_ALIGNED(struct vcpu_info, xen_vcpu_info);
DECLARE_PER_CPU(unsigned long, xen_cr3);
DECLARE_PER_CPU(unsigned long, xen_current_cr3);
diff --git a/block/blk-core.c b/block/blk-core.c
index fdf25b8d6e78..2eca76ccf4ee 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -501,9 +501,17 @@ static inline void bio_check_ro(struct bio *bio)
if (op_is_write(bio_op(bio)) && bdev_read_only(bio->bi_bdev)) {
if (op_is_flush(bio->bi_opf) && !bio_sectors(bio))
return;
- pr_warn_ratelimited("Trying to write to read-only block-device %pg\n",
- bio->bi_bdev);
- /* Older lvm-tools actually trigger this */
+
+ if (bio->bi_bdev->bd_ro_warned)
+ return;
+
+ bio->bi_bdev->bd_ro_warned = true;
+ /*
+ * Use ioctl to set underlying disk of raid/dm to read-only
+ * will trigger this.
+ */
+ pr_warn("Trying to write to read-only block-device %pg\n",
+ bio->bi_bdev);
}
}
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 900c1be1fee1..ac18f802c027 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -1512,14 +1512,26 @@ void blk_mq_delay_kick_requeue_list(struct request_queue *q,
}
EXPORT_SYMBOL(blk_mq_delay_kick_requeue_list);
+static bool blk_is_flush_data_rq(struct request *rq)
+{
+ return (rq->rq_flags & RQF_FLUSH_SEQ) && !is_flush_rq(rq);
+}
+
static bool blk_mq_rq_inflight(struct request *rq, void *priv)
{
/*
* If we find a request that isn't idle we know the queue is busy
* as it's checked in the iter.
* Return false to stop the iteration.
+ *
+ * In case of queue quiesce, if one flush data request is completed,
+ * don't count it as inflight given the flush sequence is suspended,
+ * and the original flush data request is invisible to driver, just
+ * like other pending requests because of quiesce
*/
- if (blk_mq_request_started(rq)) {
+ if (blk_mq_request_started(rq) && !(blk_queue_quiesced(rq->q) &&
+ blk_is_flush_data_rq(rq) &&
+ blk_mq_request_completed(rq))) {
bool *busy = priv;
*busy = true;
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index 63e481262336..0b2d04766324 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -615,6 +615,7 @@ static ssize_t queue_wb_lat_store(struct request_queue *q, const char *page,
QUEUE_RW_ENTRY(queue_wb_lat, "wbt_lat_usec");
#endif
+/* Common attributes for bio-based and request-based queues. */
static struct attribute *queue_attrs[] = {
&queue_ra_entry.attr,
&queue_max_hw_sectors_entry.attr,
@@ -659,6 +660,7 @@ static struct attribute *queue_attrs[] = {
NULL,
};
+/* Request-based queue attributes that are not relevant for bio-based queues. */
static struct attribute *blk_mq_queue_attrs[] = {
&queue_requests_entry.attr,
&elv_iosched_entry.attr,
diff --git a/drivers/accel/drm_accel.c b/drivers/accel/drm_accel.c
index 294b572a9c33..24cac4c0274b 100644
--- a/drivers/accel/drm_accel.c
+++ b/drivers/accel/drm_accel.c
@@ -11,6 +11,7 @@
#include <linux/idr.h>
#include <drm/drm_accel.h>
+#include <drm/drm_auth.h>
#include <drm/drm_debugfs.h>
#include <drm/drm_drv.h>
#include <drm/drm_file.h>
diff --git a/drivers/accel/qaic/mhi_controller.c b/drivers/accel/qaic/mhi_controller.c
index 5d3cc30009cc..832464f2833a 100644
--- a/drivers/accel/qaic/mhi_controller.c
+++ b/drivers/accel/qaic/mhi_controller.c
@@ -469,7 +469,7 @@ static void mhi_status_cb(struct mhi_controller *mhi_cntrl, enum mhi_callback re
pci_err(qdev->pdev, "Fatal error received from device. Attempting to recover\n");
/* this event occurs in non-atomic context */
if (reason == MHI_CB_SYS_ERROR)
- qaic_dev_reset_clean_local_state(qdev, true);
+ qaic_dev_reset_clean_local_state(qdev);
}
static int mhi_reset_and_async_power_up(struct mhi_controller *mhi_cntrl)
diff --git a/drivers/accel/qaic/qaic.h b/drivers/accel/qaic/qaic.h
index bc40d52dc010..582836f9538f 100644
--- a/drivers/accel/qaic/qaic.h
+++ b/drivers/accel/qaic/qaic.h
@@ -31,6 +31,15 @@
#define to_drm(qddev) (&(qddev)->drm)
#define to_accel_kdev(qddev) (to_drm(qddev)->accel->kdev) /* Return Linux device of accel node */
+enum __packed dev_states {
+ /* Device is offline or will be very soon */
+ QAIC_OFFLINE,
+ /* Device is booting, not clear if it's in a usable state */
+ QAIC_BOOT,
+ /* Device is fully operational */
+ QAIC_ONLINE,
+};
+
extern bool datapath_polling;
struct qaic_user {
@@ -121,8 +130,8 @@ struct qaic_device {
struct workqueue_struct *cntl_wq;
/* Synchronizes all the users of device during cleanup */
struct srcu_struct dev_lock;
- /* true: Device under reset; false: Device not under reset */
- bool in_reset;
+ /* Track the state of the device during resets */
+ enum dev_states dev_state;
/* true: single MSI is used to operate device */
bool single_msi;
/*
@@ -274,7 +283,7 @@ void wakeup_dbc(struct qaic_device *qdev, u32 dbc_id);
void release_dbc(struct qaic_device *qdev, u32 dbc_id);
void wake_all_cntl(struct qaic_device *qdev);
-void qaic_dev_reset_clean_local_state(struct qaic_device *qdev, bool exit_reset);
+void qaic_dev_reset_clean_local_state(struct qaic_device *qdev);
struct drm_gem_object *qaic_gem_prime_import(struct drm_device *dev, struct dma_buf *dma_buf);
diff --git a/drivers/accel/qaic/qaic_control.c b/drivers/accel/qaic/qaic_control.c
index 84915824be54..9e8a8cbadf6b 100644
--- a/drivers/accel/qaic/qaic_control.c
+++ b/drivers/accel/qaic/qaic_control.c
@@ -1022,7 +1022,8 @@ static void *msg_xfer(struct qaic_device *qdev, struct wrapper_list *wrappers, u
int xfer_count = 0;
int retry_count;
- if (qdev->in_reset) {
+ /* Allow QAIC_BOOT state since we need to check control protocol version */
+ if (qdev->dev_state == QAIC_OFFLINE) {
mutex_unlock(&qdev->cntl_mutex);
return ERR_PTR(-ENODEV);
}
@@ -1306,7 +1307,7 @@ int qaic_manage_ioctl(struct drm_device *dev, void *data, struct drm_file *file_
qdev = usr->qddev->qdev;
qdev_rcu_id = srcu_read_lock(&qdev->dev_lock);
- if (qdev->in_reset) {
+ if (qdev->dev_state != QAIC_ONLINE) {
srcu_read_unlock(&qdev->dev_lock, qdev_rcu_id);
srcu_read_unlock(&usr->qddev_lock, usr_rcu_id);
return -ENODEV;
diff --git a/drivers/accel/qaic/qaic_data.c b/drivers/accel/qaic/qaic_data.c
index 8998c28e566e..cf2898eda7ae 100644
--- a/drivers/accel/qaic/qaic_data.c
+++ b/drivers/accel/qaic/qaic_data.c
@@ -690,7 +690,7 @@ int qaic_create_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *fi
qdev = usr->qddev->qdev;
qdev_rcu_id = srcu_read_lock(&qdev->dev_lock);
- if (qdev->in_reset) {
+ if (qdev->dev_state != QAIC_ONLINE) {
ret = -ENODEV;
goto unlock_dev_srcu;
}
@@ -749,7 +749,7 @@ int qaic_mmap_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file
qdev = usr->qddev->qdev;
qdev_rcu_id = srcu_read_lock(&qdev->dev_lock);
- if (qdev->in_reset) {
+ if (qdev->dev_state != QAIC_ONLINE) {
ret = -ENODEV;
goto unlock_dev_srcu;
}
@@ -970,7 +970,7 @@ int qaic_attach_slice_bo_ioctl(struct drm_device *dev, void *data, struct drm_fi
qdev = usr->qddev->qdev;
qdev_rcu_id = srcu_read_lock(&qdev->dev_lock);
- if (qdev->in_reset) {
+ if (qdev->dev_state != QAIC_ONLINE) {
ret = -ENODEV;
goto unlock_dev_srcu;
}
@@ -1341,7 +1341,7 @@ static int __qaic_execute_bo_ioctl(struct drm_device *dev, void *data, struct dr
qdev = usr->qddev->qdev;
qdev_rcu_id = srcu_read_lock(&qdev->dev_lock);
- if (qdev->in_reset) {
+ if (qdev->dev_state != QAIC_ONLINE) {
ret = -ENODEV;
goto unlock_dev_srcu;
}
@@ -1497,7 +1497,7 @@ void irq_polling_work(struct work_struct *work)
rcu_id = srcu_read_lock(&dbc->ch_lock);
while (1) {
- if (dbc->qdev->in_reset) {
+ if (dbc->qdev->dev_state != QAIC_ONLINE) {
srcu_read_unlock(&dbc->ch_lock, rcu_id);
return;
}
@@ -1687,7 +1687,7 @@ int qaic_wait_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file
qdev = usr->qddev->qdev;
qdev_rcu_id = srcu_read_lock(&qdev->dev_lock);
- if (qdev->in_reset) {
+ if (qdev->dev_state != QAIC_ONLINE) {
ret = -ENODEV;
goto unlock_dev_srcu;
}
@@ -1756,7 +1756,7 @@ int qaic_perf_stats_bo_ioctl(struct drm_device *dev, void *data, struct drm_file
qdev = usr->qddev->qdev;
qdev_rcu_id = srcu_read_lock(&qdev->dev_lock);
- if (qdev->in_reset) {
+ if (qdev->dev_state != QAIC_ONLINE) {
ret = -ENODEV;
goto unlock_dev_srcu;
}
@@ -1847,7 +1847,7 @@ int qaic_detach_slice_bo_ioctl(struct drm_device *dev, void *data, struct drm_fi
qdev = usr->qddev->qdev;
qdev_rcu_id = srcu_read_lock(&qdev->dev_lock);
- if (qdev->in_reset) {
+ if (qdev->dev_state != QAIC_ONLINE) {
ret = -ENODEV;
goto unlock_dev_srcu;
}
diff --git a/drivers/accel/qaic/qaic_drv.c b/drivers/accel/qaic/qaic_drv.c
index b12226385003..2a313eb69b12 100644
--- a/drivers/accel/qaic/qaic_drv.c
+++ b/drivers/accel/qaic/qaic_drv.c
@@ -8,6 +8,7 @@
#include <linux/idr.h>
#include <linux/interrupt.h>
#include <linux/list.h>
+#include <linux/kobject.h>
#include <linux/kref.h>
#include <linux/mhi.h>
#include <linux/module.h>
@@ -43,9 +44,6 @@ MODULE_PARM_DESC(datapath_polling, "Operate the datapath in polling mode");
static bool link_up;
static DEFINE_IDA(qaic_usrs);
-static int qaic_create_drm_device(struct qaic_device *qdev, s32 partition_id);
-static void qaic_destroy_drm_device(struct qaic_device *qdev, s32 partition_id);
-
static void free_usr(struct kref *kref)
{
struct qaic_user *usr = container_of(kref, struct qaic_user, ref_count);
@@ -64,7 +62,7 @@ static int qaic_open(struct drm_device *dev, struct drm_file *file)
int ret;
rcu_id = srcu_read_lock(&qdev->dev_lock);
- if (qdev->in_reset) {
+ if (qdev->dev_state != QAIC_ONLINE) {
ret = -ENODEV;
goto dev_unlock;
}
@@ -121,7 +119,7 @@ static void qaic_postclose(struct drm_device *dev, struct drm_file *file)
if (qddev) {
qdev = qddev->qdev;
qdev_rcu_id = srcu_read_lock(&qdev->dev_lock);
- if (!qdev->in_reset) {
+ if (qdev->dev_state == QAIC_ONLINE) {
qaic_release_usr(qdev, usr);
for (i = 0; i < qdev->num_dbc; ++i)
if (qdev->dbc[i].usr && qdev->dbc[i].usr->handle == usr->handle)
@@ -183,13 +181,6 @@ static int qaic_create_drm_device(struct qaic_device *qdev, s32 partition_id)
qddev->partition_id = partition_id;
- /*
- * drm_dev_unregister() sets the driver data to NULL and
- * drm_dev_register() does not update the driver data. During a SOC
- * reset drm dev is unregistered and registered again leaving the
- * driver data to NULL.
- */
- dev_set_drvdata(to_accel_kdev(qddev), drm->accel);
ret = drm_dev_register(drm, 0);
if (ret)
pci_dbg(qdev->pdev, "drm_dev_register failed %d\n", ret);
@@ -203,7 +194,6 @@ static void qaic_destroy_drm_device(struct qaic_device *qdev, s32 partition_id)
struct drm_device *drm = to_drm(qddev);
struct qaic_user *usr;
- drm_dev_get(drm);
drm_dev_unregister(drm);
qddev->partition_id = 0;
/*
@@ -232,7 +222,6 @@ static void qaic_destroy_drm_device(struct qaic_device *qdev, s32 partition_id)
mutex_lock(&qddev->users_mutex);
}
mutex_unlock(&qddev->users_mutex);
- drm_dev_put(drm);
}
static int qaic_mhi_probe(struct mhi_device *mhi_dev, const struct mhi_device_id *id)
@@ -254,8 +243,6 @@ static int qaic_mhi_probe(struct mhi_device *mhi_dev, const struct mhi_device_id
qdev = pci_get_drvdata(to_pci_dev(mhi_dev->mhi_cntrl->cntrl_dev));
- qdev->in_reset = false;
-
dev_set_drvdata(&mhi_dev->dev, qdev);
qdev->cntl_ch = mhi_dev;
@@ -265,6 +252,7 @@ static int qaic_mhi_probe(struct mhi_device *mhi_dev, const struct mhi_device_id
return ret;
}
+ qdev->dev_state = QAIC_BOOT;
ret = get_cntl_version(qdev, NULL, &major, &minor);
if (ret || major != CNTL_MAJOR || minor > CNTL_MINOR) {
pci_err(qdev->pdev, "%s: Control protocol version (%d.%d) not supported. Supported version is (%d.%d). Ret: %d\n",
@@ -272,8 +260,8 @@ static int qaic_mhi_probe(struct mhi_device *mhi_dev, const struct mhi_device_id
ret = -EINVAL;
goto close_control;
}
-
- ret = qaic_create_drm_device(qdev, QAIC_NO_PARTITION);
+ qdev->dev_state = QAIC_ONLINE;
+ kobject_uevent(&(to_accel_kdev(qdev->qddev))->kobj, KOBJ_ONLINE);
return ret;
@@ -291,7 +279,8 @@ static void qaic_notify_reset(struct qaic_device *qdev)
{
int i;
- qdev->in_reset = true;
+ kobject_uevent(&(to_accel_kdev(qdev->qddev))->kobj, KOBJ_OFFLINE);
+ qdev->dev_state = QAIC_OFFLINE;
/* wake up any waiters to avoid waiting for timeouts at sync */
wake_all_cntl(qdev);
for (i = 0; i < qdev->num_dbc; ++i)
@@ -299,21 +288,15 @@ static void qaic_notify_reset(struct qaic_device *qdev)
synchronize_srcu(&qdev->dev_lock);
}
-void qaic_dev_reset_clean_local_state(struct qaic_device *qdev, bool exit_reset)
+void qaic_dev_reset_clean_local_state(struct qaic_device *qdev)
{
int i;
qaic_notify_reset(qdev);
- /* remove drmdevs to prevent new users from coming in */
- qaic_destroy_drm_device(qdev, QAIC_NO_PARTITION);
-
/* start tearing things down */
for (i = 0; i < qdev->num_dbc; ++i)
release_dbc(qdev, i);
-
- if (exit_reset)
- qdev->in_reset = false;
}
static void cleanup_qdev(struct qaic_device *qdev)
@@ -338,6 +321,7 @@ static struct qaic_device *create_qdev(struct pci_dev *pdev, const struct pci_de
if (!qdev)
return NULL;
+ qdev->dev_state = QAIC_OFFLINE;
if (id->device == PCI_DEV_AIC100) {
qdev->num_dbc = 16;
qdev->dbc = devm_kcalloc(&pdev->dev, qdev->num_dbc, sizeof(*qdev->dbc), GFP_KERNEL);
@@ -499,15 +483,21 @@ static int qaic_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto cleanup_qdev;
}
+ ret = qaic_create_drm_device(qdev, QAIC_NO_PARTITION);
+ if (ret)
+ goto cleanup_qdev;
+
qdev->mhi_cntrl = qaic_mhi_register_controller(pdev, qdev->bar_0, mhi_irq,
qdev->single_msi);
if (IS_ERR(qdev->mhi_cntrl)) {
ret = PTR_ERR(qdev->mhi_cntrl);
- goto cleanup_qdev;
+ goto cleanup_drm_dev;
}
return 0;
+cleanup_drm_dev:
+ qaic_destroy_drm_device(qdev, QAIC_NO_PARTITION);
cleanup_qdev:
cleanup_qdev(qdev);
return ret;
@@ -520,7 +510,8 @@ static void qaic_pci_remove(struct pci_dev *pdev)
if (!qdev)
return;
- qaic_dev_reset_clean_local_state(qdev, false);
+ qaic_dev_reset_clean_local_state(qdev);
+ qaic_destroy_drm_device(qdev, QAIC_NO_PARTITION);
qaic_mhi_free_controller(qdev->mhi_cntrl, link_up);
cleanup_qdev(qdev);
}
@@ -543,14 +534,13 @@ static void qaic_pci_reset_prepare(struct pci_dev *pdev)
qaic_notify_reset(qdev);
qaic_mhi_start_reset(qdev->mhi_cntrl);
- qaic_dev_reset_clean_local_state(qdev, false);
+ qaic_dev_reset_clean_local_state(qdev);
}
static void qaic_pci_reset_done(struct pci_dev *pdev)
{
struct qaic_device *qdev = pci_get_drvdata(pdev);
- qdev->in_reset = false;
qaic_mhi_reset_done(qdev->mhi_cntrl);
}
diff --git a/drivers/acpi/acpi_video.c b/drivers/acpi/acpi_video.c
index d321ca7160d9..6cee536c229a 100644
--- a/drivers/acpi/acpi_video.c
+++ b/drivers/acpi/acpi_video.c
@@ -253,8 +253,7 @@ static const struct backlight_ops acpi_backlight_ops = {
static int video_get_max_state(struct thermal_cooling_device *cooling_dev,
unsigned long *state)
{
- struct acpi_device *device = cooling_dev->devdata;
- struct acpi_video_device *video = acpi_driver_data(device);
+ struct acpi_video_device *video = cooling_dev->devdata;
*state = video->brightness->count - ACPI_VIDEO_FIRST_LEVEL - 1;
return 0;
@@ -263,8 +262,7 @@ static int video_get_max_state(struct thermal_cooling_device *cooling_dev,
static int video_get_cur_state(struct thermal_cooling_device *cooling_dev,
unsigned long *state)
{
- struct acpi_device *device = cooling_dev->devdata;
- struct acpi_video_device *video = acpi_driver_data(device);
+ struct acpi_video_device *video = cooling_dev->devdata;
unsigned long long level;
int offset;
@@ -283,8 +281,7 @@ static int video_get_cur_state(struct thermal_cooling_device *cooling_dev,
static int
video_set_cur_state(struct thermal_cooling_device *cooling_dev, unsigned long state)
{
- struct acpi_device *device = cooling_dev->devdata;
- struct acpi_video_device *video = acpi_driver_data(device);
+ struct acpi_video_device *video = cooling_dev->devdata;
int level;
if (state >= video->brightness->count - ACPI_VIDEO_FIRST_LEVEL)
@@ -1125,7 +1122,6 @@ static int acpi_video_bus_get_one_device(struct acpi_device *device, void *arg)
strcpy(acpi_device_name(device), ACPI_VIDEO_DEVICE_NAME);
strcpy(acpi_device_class(device), ACPI_VIDEO_CLASS);
- device->driver_data = data;
data->device_id = device_id;
data->video = video;
@@ -1747,8 +1743,8 @@ static void acpi_video_dev_register_backlight(struct acpi_video_device *device)
device->backlight->props.brightness =
acpi_video_get_brightness(device->backlight);
- device->cooling_dev = thermal_cooling_device_register("LCD",
- device->dev, &video_cooling_ops);
+ device->cooling_dev = thermal_cooling_device_register("LCD", device,
+ &video_cooling_ops);
if (IS_ERR(device->cooling_dev)) {
/*
* Set cooling_dev to NULL so we don't crash trying to free it.
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index fa5dd71a80fa..02bb2cce423f 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -1568,17 +1568,22 @@ static const struct iommu_ops *acpi_iommu_configure_id(struct device *dev,
int err;
const struct iommu_ops *ops;
+ /* Serialise to make dev->iommu stable under our potential fwspec */
+ mutex_lock(&iommu_probe_device_lock);
/*
* If we already translated the fwspec there is nothing left to do,
* return the iommu_ops.
*/
ops = acpi_iommu_fwspec_ops(dev);
- if (ops)
+ if (ops) {
+ mutex_unlock(&iommu_probe_device_lock);
return ops;
+ }
err = iort_iommu_configure_id(dev, id_in);
if (err && err != -EPROBE_DEFER)
err = viot_iommu_configure(dev);
+ mutex_unlock(&iommu_probe_device_lock);
/*
* If we have reason to believe the IOMMU driver missed the initial
diff --git a/drivers/acpi/utils.c b/drivers/acpi/utils.c
index 28c75242fca9..62944e35fcee 100644
--- a/drivers/acpi/utils.c
+++ b/drivers/acpi/utils.c
@@ -399,13 +399,13 @@ acpi_evaluate_reference(acpi_handle handle,
acpi_handle_debug(list->handles[i], "Found in reference list\n");
}
-end:
if (ACPI_FAILURE(status)) {
list->count = 0;
kfree(list->handles);
list->handles = NULL;
}
+end:
kfree(buffer.pointer);
return status;
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index c10ff8985203..0a0f483124c3 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -1055,9 +1055,14 @@ int ata_scsi_dev_config(struct scsi_device *sdev, struct ata_device *dev)
* Ask the sd driver to issue START STOP UNIT on runtime suspend
* and resume and shutdown only. For system level suspend/resume,
* devices power state is handled directly by libata EH.
+ * Given that disks are always spun up on system resume, also
+ * make sure that the sd driver forces runtime suspended disks
+ * to be resumed to correctly reflect the power state of the
+ * device.
*/
- sdev->manage_runtime_start_stop = true;
- sdev->manage_shutdown = true;
+ sdev->manage_runtime_start_stop = 1;
+ sdev->manage_shutdown = 1;
+ sdev->force_runtime_start_on_system_start = 1;
}
/*
diff --git a/drivers/auxdisplay/Kconfig b/drivers/auxdisplay/Kconfig
index 64012cda4d12..d944d5298eca 100644
--- a/drivers/auxdisplay/Kconfig
+++ b/drivers/auxdisplay/Kconfig
@@ -112,10 +112,7 @@ config CFAG12864B
depends on X86
depends on FB
depends on KS0108
- select FB_SYS_FILLRECT
- select FB_SYS_COPYAREA
- select FB_SYS_IMAGEBLIT
- select FB_SYS_FOPS
+ select FB_SYSMEM_HELPERS
default n
help
If you have a Crystalfontz 128x64 2-color LCD, cfag12864b Series,
@@ -170,10 +167,7 @@ config IMG_ASCII_LCD
config HT16K33
tristate "Holtek Ht16K33 LED controller with keyscan"
depends on FB && I2C && INPUT
- select FB_SYS_FOPS
- select FB_SYS_FILLRECT
- select FB_SYS_COPYAREA
- select FB_SYS_IMAGEBLIT
+ select FB_SYSMEM_HELPERS
select INPUT_MATRIXKMAP
select FB_BACKLIGHT
select NEW_LEDS
diff --git a/drivers/auxdisplay/cfag12864bfb.c b/drivers/auxdisplay/cfag12864bfb.c
index 729845bcc803..5ba19c339f08 100644
--- a/drivers/auxdisplay/cfag12864bfb.c
+++ b/drivers/auxdisplay/cfag12864bfb.c
@@ -51,16 +51,15 @@ static int cfag12864bfb_mmap(struct fb_info *info, struct vm_area_struct *vma)
{
struct page *pages = virt_to_page(cfag12864b_buffer);
+ vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
+
return vm_map_pages_zero(vma, &pages, 1);
}
static const struct fb_ops cfag12864bfb_ops = {
.owner = THIS_MODULE,
- .fb_read = fb_sys_read,
- .fb_write = fb_sys_write,
- .fb_fillrect = sys_fillrect,
- .fb_copyarea = sys_copyarea,
- .fb_imageblit = sys_imageblit,
+ __FB_DEFAULT_SYSMEM_OPS_RDWR,
+ __FB_DEFAULT_SYSMEM_OPS_DRAW,
.fb_mmap = cfag12864bfb_mmap,
};
@@ -72,6 +71,7 @@ static int cfag12864bfb_probe(struct platform_device *device)
if (!info)
goto none;
+ info->flags = FBINFO_VIRTFB;
info->screen_buffer = cfag12864b_buffer;
info->screen_size = CFAG12864B_SIZE;
info->fbops = &cfag12864bfb_ops;
diff --git a/drivers/auxdisplay/ht16k33.c b/drivers/auxdisplay/ht16k33.c
index 3a2d88387224..a90430b7d07b 100644
--- a/drivers/auxdisplay/ht16k33.c
+++ b/drivers/auxdisplay/ht16k33.c
@@ -351,17 +351,16 @@ static int ht16k33_mmap(struct fb_info *info, struct vm_area_struct *vma)
struct ht16k33_priv *priv = info->par;
struct page *pages = virt_to_page(priv->fbdev.buffer);
+ vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
+
return vm_map_pages_zero(vma, &pages, 1);
}
static const struct fb_ops ht16k33_fb_ops = {
.owner = THIS_MODULE,
- .fb_read = fb_sys_read,
- .fb_write = fb_sys_write,
+ __FB_DEFAULT_SYSMEM_OPS_RDWR,
.fb_blank = ht16k33_blank,
- .fb_fillrect = sys_fillrect,
- .fb_copyarea = sys_copyarea,
- .fb_imageblit = sys_imageblit,
+ __FB_DEFAULT_SYSMEM_OPS_DRAW,
.fb_mmap = ht16k33_mmap,
};
@@ -640,6 +639,7 @@ static int ht16k33_fbdev_probe(struct device *dev, struct ht16k33_priv *priv,
INIT_DELAYED_WORK(&priv->work, ht16k33_fb_update);
fbdev->info->fbops = &ht16k33_fb_ops;
+ fbdev->info->flags |= FBINFO_VIRTFB;
fbdev->info->screen_buffer = fbdev->buffer;
fbdev->info->screen_size = HT16K33_FB_SIZE;
fbdev->info->fix = ht16k33_fb_fix;
diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c
index 9ea22e165acd..548491de818e 100644
--- a/drivers/base/cpu.c
+++ b/drivers/base/cpu.c
@@ -144,7 +144,7 @@ static DEVICE_ATTR(release, S_IWUSR, NULL, cpu_release_store);
#endif /* CONFIG_ARCH_CPU_PROBE_RELEASE */
#endif /* CONFIG_HOTPLUG_CPU */
-#ifdef CONFIG_KEXEC
+#ifdef CONFIG_KEXEC_CORE
#include <linux/kexec.h>
static ssize_t crash_notes_show(struct device *dev,
@@ -189,14 +189,14 @@ static const struct attribute_group crash_note_cpu_attr_group = {
#endif
static const struct attribute_group *common_cpu_attr_groups[] = {
-#ifdef CONFIG_KEXEC
+#ifdef CONFIG_KEXEC_CORE
&crash_note_cpu_attr_group,
#endif
NULL
};
static const struct attribute_group *hotplugable_cpu_attr_groups[] = {
-#ifdef CONFIG_KEXEC
+#ifdef CONFIG_KEXEC_CORE
&crash_note_cpu_attr_group,
#endif
NULL
diff --git a/drivers/base/devcoredump.c b/drivers/base/devcoredump.c
index 91536ee05f14..7e2d1f0d903a 100644
--- a/drivers/base/devcoredump.c
+++ b/drivers/base/devcoredump.c
@@ -362,6 +362,7 @@ void dev_coredumpm(struct device *dev, struct module *owner,
devcd->devcd_dev.class = &devcd_class;
mutex_lock(&devcd->mutex);
+ dev_set_uevent_suppress(&devcd->devcd_dev, true);
if (device_add(&devcd->devcd_dev))
goto put_device;
@@ -376,6 +377,8 @@ void dev_coredumpm(struct device *dev, struct module *owner,
"devcoredump"))
dev_warn(dev, "devcoredump create_link failed\n");
+ dev_set_uevent_suppress(&devcd->devcd_dev, false);
+ kobject_uevent(&devcd->devcd_dev.kobj, KOBJ_ADD);
INIT_DELAYED_WORK(&devcd->del_wk, devcd_del);
schedule_delayed_work(&devcd->del_wk, DEVCD_TIMEOUT);
mutex_unlock(&devcd->mutex);
diff --git a/drivers/base/memory.c b/drivers/base/memory.c
index f3b9a4d0fa3b..8a13babd826c 100644
--- a/drivers/base/memory.c
+++ b/drivers/base/memory.c
@@ -180,6 +180,9 @@ static inline unsigned long memblk_nr_poison(struct memory_block *mem)
}
#endif
+/*
+ * Must acquire mem_hotplug_lock in write mode.
+ */
static int memory_block_online(struct memory_block *mem)
{
unsigned long start_pfn = section_nr_to_pfn(mem->start_section_nr);
@@ -204,10 +207,11 @@ static int memory_block_online(struct memory_block *mem)
if (mem->altmap)
nr_vmemmap_pages = mem->altmap->free;
+ mem_hotplug_begin();
if (nr_vmemmap_pages) {
ret = mhp_init_memmap_on_memory(start_pfn, nr_vmemmap_pages, zone);
if (ret)
- return ret;
+ goto out;
}
ret = online_pages(start_pfn + nr_vmemmap_pages,
@@ -215,7 +219,7 @@ static int memory_block_online(struct memory_block *mem)
if (ret) {
if (nr_vmemmap_pages)
mhp_deinit_memmap_on_memory(start_pfn, nr_vmemmap_pages);
- return ret;
+ goto out;
}
/*
@@ -227,9 +231,14 @@ static int memory_block_online(struct memory_block *mem)
nr_vmemmap_pages);
mem->zone = zone;
+out:
+ mem_hotplug_done();
return ret;
}
+/*
+ * Must acquire mem_hotplug_lock in write mode.
+ */
static int memory_block_offline(struct memory_block *mem)
{
unsigned long start_pfn = section_nr_to_pfn(mem->start_section_nr);
@@ -247,6 +256,7 @@ static int memory_block_offline(struct memory_block *mem)
if (mem->altmap)
nr_vmemmap_pages = mem->altmap->free;
+ mem_hotplug_begin();
if (nr_vmemmap_pages)
adjust_present_page_count(pfn_to_page(start_pfn), mem->group,
-nr_vmemmap_pages);
@@ -258,13 +268,15 @@ static int memory_block_offline(struct memory_block *mem)
if (nr_vmemmap_pages)
adjust_present_page_count(pfn_to_page(start_pfn),
mem->group, nr_vmemmap_pages);
- return ret;
+ goto out;
}
if (nr_vmemmap_pages)
mhp_deinit_memmap_on_memory(start_pfn, nr_vmemmap_pages);
mem->zone = NULL;
+out:
+ mem_hotplug_done();
return ret;
}
diff --git a/drivers/base/regmap/regcache.c b/drivers/base/regmap/regcache.c
index 92592f944a3d..ac63a73ccdaa 100644
--- a/drivers/base/regmap/regcache.c
+++ b/drivers/base/regmap/regcache.c
@@ -410,8 +410,7 @@ out:
rb_entry(node, struct regmap_range_node, node);
/* If there's nothing in the cache there's nothing to sync */
- ret = regcache_read(map, this->selector_reg, &i);
- if (ret != 0)
+ if (regcache_read(map, this->selector_reg, &i) != 0)
continue;
ret = _regmap_write(map, this->selector_reg, i);
diff --git a/drivers/char/agp/Makefile b/drivers/char/agp/Makefile
index 25834557e486..43b09cf193bb 100644
--- a/drivers/char/agp/Makefile
+++ b/drivers/char/agp/Makefile
@@ -1,12 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
agpgart-y := backend.o generic.o isoch.o
-ifeq ($(CONFIG_DRM_LEGACY),y)
-agpgart-$(CONFIG_COMPAT) += compat_ioctl.o
-agpgart-y += frontend.o
-endif
-
-
obj-$(CONFIG_AGP) += agpgart.o
obj-$(CONFIG_AGP_ALI) += ali-agp.o
obj-$(CONFIG_AGP_ATI) += ati-agp.o
diff --git a/drivers/char/agp/agp.h b/drivers/char/agp/agp.h
index 8771dcc9b8e2..5c36ab85f80b 100644
--- a/drivers/char/agp/agp.h
+++ b/drivers/char/agp/agp.h
@@ -185,15 +185,6 @@ void agp_put_bridge(struct agp_bridge_data *bridge);
int agp_add_bridge(struct agp_bridge_data *bridge);
void agp_remove_bridge(struct agp_bridge_data *bridge);
-/* Frontend routines. */
-#if IS_ENABLED(CONFIG_DRM_LEGACY)
-int agp_frontend_initialize(void);
-void agp_frontend_cleanup(void);
-#else
-static inline int agp_frontend_initialize(void) { return 0; }
-static inline void agp_frontend_cleanup(void) {}
-#endif
-
/* Generic routines. */
void agp_generic_enable(struct agp_bridge_data *bridge, u32 mode);
int agp_generic_create_gatt_table(struct agp_bridge_data *bridge);
diff --git a/drivers/char/agp/backend.c b/drivers/char/agp/backend.c
index 0e19c600db53..1776afd3ee07 100644
--- a/drivers/char/agp/backend.c
+++ b/drivers/char/agp/backend.c
@@ -293,13 +293,6 @@ int agp_add_bridge(struct agp_bridge_data *bridge)
}
if (list_empty(&agp_bridges)) {
- error = agp_frontend_initialize();
- if (error) {
- dev_info(&bridge->dev->dev,
- "agp_frontend_initialize() failed\n");
- goto frontend_err;
- }
-
dev_info(&bridge->dev->dev, "AGP aperture is %dM @ 0x%lx\n",
bridge->driver->fetch_size(), bridge->gart_bus_addr);
@@ -308,8 +301,6 @@ int agp_add_bridge(struct agp_bridge_data *bridge)
list_add(&bridge->list, &agp_bridges);
return 0;
-frontend_err:
- agp_backend_cleanup(bridge);
err_out:
module_put(bridge->driver->owner);
err_put_bridge:
@@ -323,8 +314,6 @@ void agp_remove_bridge(struct agp_bridge_data *bridge)
{
agp_backend_cleanup(bridge);
list_del(&bridge->list);
- if (list_empty(&agp_bridges))
- agp_frontend_cleanup();
module_put(bridge->driver->owner);
}
EXPORT_SYMBOL_GPL(agp_remove_bridge);
diff --git a/drivers/char/agp/compat_ioctl.c b/drivers/char/agp/compat_ioctl.c
deleted file mode 100644
index 52ffe1706ce0..000000000000
--- a/drivers/char/agp/compat_ioctl.c
+++ /dev/null
@@ -1,291 +0,0 @@
-/*
- * AGPGART driver frontend compatibility ioctls
- * Copyright (C) 2004 Silicon Graphics, Inc.
- * Copyright (C) 2002-2003 Dave Jones
- * Copyright (C) 1999 Jeff Hartmann
- * Copyright (C) 1999 Precision Insight, Inc.
- * Copyright (C) 1999 Xi Graphics, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
- * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * JEFF HARTMANN, OR ANY OTHER CONTRIBUTORS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
- * OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#include <linux/kernel.h>
-#include <linux/pci.h>
-#include <linux/fs.h>
-#include <linux/agpgart.h>
-#include <linux/slab.h>
-#include <linux/uaccess.h>
-#include "agp.h"
-#include "compat_ioctl.h"
-
-static int compat_agpioc_info_wrap(struct agp_file_private *priv, void __user *arg)
-{
- struct agp_info32 userinfo;
- struct agp_kern_info kerninfo;
-
- agp_copy_info(agp_bridge, &kerninfo);
-
- userinfo.version.major = kerninfo.version.major;
- userinfo.version.minor = kerninfo.version.minor;
- userinfo.bridge_id = kerninfo.device->vendor |
- (kerninfo.device->device << 16);
- userinfo.agp_mode = kerninfo.mode;
- userinfo.aper_base = (compat_long_t)kerninfo.aper_base;
- userinfo.aper_size = kerninfo.aper_size;
- userinfo.pg_total = userinfo.pg_system = kerninfo.max_memory;
- userinfo.pg_used = kerninfo.current_memory;
-
- if (copy_to_user(arg, &userinfo, sizeof(userinfo)))
- return -EFAULT;
-
- return 0;
-}
-
-static int compat_agpioc_reserve_wrap(struct agp_file_private *priv, void __user *arg)
-{
- struct agp_region32 ureserve;
- struct agp_region kreserve;
- struct agp_client *client;
- struct agp_file_private *client_priv;
-
- DBG("");
- if (copy_from_user(&ureserve, arg, sizeof(ureserve)))
- return -EFAULT;
-
- if ((unsigned) ureserve.seg_count >= ~0U/sizeof(struct agp_segment32))
- return -EFAULT;
-
- kreserve.pid = ureserve.pid;
- kreserve.seg_count = ureserve.seg_count;
-
- client = agp_find_client_by_pid(kreserve.pid);
-
- if (kreserve.seg_count == 0) {
- /* remove a client */
- client_priv = agp_find_private(kreserve.pid);
-
- if (client_priv != NULL) {
- set_bit(AGP_FF_IS_CLIENT, &client_priv->access_flags);
- set_bit(AGP_FF_IS_VALID, &client_priv->access_flags);
- }
- if (client == NULL) {
- /* client is already removed */
- return 0;
- }
- return agp_remove_client(kreserve.pid);
- } else {
- struct agp_segment32 *usegment;
- struct agp_segment *ksegment;
- int seg;
-
- if (ureserve.seg_count >= 16384)
- return -EINVAL;
-
- usegment = kmalloc_array(ureserve.seg_count,
- sizeof(*usegment),
- GFP_KERNEL);
- if (!usegment)
- return -ENOMEM;
-
- ksegment = kmalloc_array(kreserve.seg_count,
- sizeof(*ksegment),
- GFP_KERNEL);
- if (!ksegment) {
- kfree(usegment);
- return -ENOMEM;
- }
-
- if (copy_from_user(usegment, (void __user *) ureserve.seg_list,
- sizeof(*usegment) * ureserve.seg_count)) {
- kfree(usegment);
- kfree(ksegment);
- return -EFAULT;
- }
-
- for (seg = 0; seg < ureserve.seg_count; seg++) {
- ksegment[seg].pg_start = usegment[seg].pg_start;
- ksegment[seg].pg_count = usegment[seg].pg_count;
- ksegment[seg].prot = usegment[seg].prot;
- }
-
- kfree(usegment);
- kreserve.seg_list = ksegment;
-
- if (client == NULL) {
- /* Create the client and add the segment */
- client = agp_create_client(kreserve.pid);
-
- if (client == NULL) {
- kfree(ksegment);
- return -ENOMEM;
- }
- client_priv = agp_find_private(kreserve.pid);
-
- if (client_priv != NULL) {
- set_bit(AGP_FF_IS_CLIENT, &client_priv->access_flags);
- set_bit(AGP_FF_IS_VALID, &client_priv->access_flags);
- }
- }
- return agp_create_segment(client, &kreserve);
- }
- /* Will never really happen */
- return -EINVAL;
-}
-
-static int compat_agpioc_allocate_wrap(struct agp_file_private *priv, void __user *arg)
-{
- struct agp_memory *memory;
- struct agp_allocate32 alloc;
-
- DBG("");
- if (copy_from_user(&alloc, arg, sizeof(alloc)))
- return -EFAULT;
-
- memory = agp_allocate_memory_wrap(alloc.pg_count, alloc.type);
-
- if (memory == NULL)
- return -ENOMEM;
-
- alloc.key = memory->key;
- alloc.physical = memory->physical;
-
- if (copy_to_user(arg, &alloc, sizeof(alloc))) {
- agp_free_memory_wrap(memory);
- return -EFAULT;
- }
- return 0;
-}
-
-static int compat_agpioc_bind_wrap(struct agp_file_private *priv, void __user *arg)
-{
- struct agp_bind32 bind_info;
- struct agp_memory *memory;
-
- DBG("");
- if (copy_from_user(&bind_info, arg, sizeof(bind_info)))
- return -EFAULT;
-
- memory = agp_find_mem_by_key(bind_info.key);
-
- if (memory == NULL)
- return -EINVAL;
-
- return agp_bind_memory(memory, bind_info.pg_start);
-}
-
-static int compat_agpioc_unbind_wrap(struct agp_file_private *priv, void __user *arg)
-{
- struct agp_memory *memory;
- struct agp_unbind32 unbind;
-
- DBG("");
- if (copy_from_user(&unbind, arg, sizeof(unbind)))
- return -EFAULT;
-
- memory = agp_find_mem_by_key(unbind.key);
-
- if (memory == NULL)
- return -EINVAL;
-
- return agp_unbind_memory(memory);
-}
-
-long compat_agp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
-{
- struct agp_file_private *curr_priv = file->private_data;
- int ret_val = -ENOTTY;
-
- mutex_lock(&(agp_fe.agp_mutex));
-
- if ((agp_fe.current_controller == NULL) &&
- (cmd != AGPIOC_ACQUIRE32)) {
- ret_val = -EINVAL;
- goto ioctl_out;
- }
- if ((agp_fe.backend_acquired != true) &&
- (cmd != AGPIOC_ACQUIRE32)) {
- ret_val = -EBUSY;
- goto ioctl_out;
- }
- if (cmd != AGPIOC_ACQUIRE32) {
- if (!(test_bit(AGP_FF_IS_CONTROLLER, &curr_priv->access_flags))) {
- ret_val = -EPERM;
- goto ioctl_out;
- }
- /* Use the original pid of the controller,
- * in case it's threaded */
-
- if (agp_fe.current_controller->pid != curr_priv->my_pid) {
- ret_val = -EBUSY;
- goto ioctl_out;
- }
- }
-
- switch (cmd) {
- case AGPIOC_INFO32:
- ret_val = compat_agpioc_info_wrap(curr_priv, (void __user *) arg);
- break;
-
- case AGPIOC_ACQUIRE32:
- ret_val = agpioc_acquire_wrap(curr_priv);
- break;
-
- case AGPIOC_RELEASE32:
- ret_val = agpioc_release_wrap(curr_priv);
- break;
-
- case AGPIOC_SETUP32:
- ret_val = agpioc_setup_wrap(curr_priv, (void __user *) arg);
- break;
-
- case AGPIOC_RESERVE32:
- ret_val = compat_agpioc_reserve_wrap(curr_priv, (void __user *) arg);
- break;
-
- case AGPIOC_PROTECT32:
- ret_val = agpioc_protect_wrap(curr_priv);
- break;
-
- case AGPIOC_ALLOCATE32:
- ret_val = compat_agpioc_allocate_wrap(curr_priv, (void __user *) arg);
- break;
-
- case AGPIOC_DEALLOCATE32:
- ret_val = agpioc_deallocate_wrap(curr_priv, (int) arg);
- break;
-
- case AGPIOC_BIND32:
- ret_val = compat_agpioc_bind_wrap(curr_priv, (void __user *) arg);
- break;
-
- case AGPIOC_UNBIND32:
- ret_val = compat_agpioc_unbind_wrap(curr_priv, (void __user *) arg);
- break;
-
- case AGPIOC_CHIPSET_FLUSH32:
- break;
- }
-
-ioctl_out:
- DBG("ioctl returns %d\n", ret_val);
- mutex_unlock(&(agp_fe.agp_mutex));
- return ret_val;
-}
-
diff --git a/drivers/char/agp/compat_ioctl.h b/drivers/char/agp/compat_ioctl.h
deleted file mode 100644
index f30e0fd97963..000000000000
--- a/drivers/char/agp/compat_ioctl.h
+++ /dev/null
@@ -1,106 +0,0 @@
-/*
- * Copyright (C) 1999 Jeff Hartmann
- * Copyright (C) 1999 Precision Insight, Inc.
- * Copyright (C) 1999 Xi Graphics, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
- * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * JEFF HARTMANN, OR ANY OTHER CONTRIBUTORS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
- * OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#ifndef _AGP_COMPAT_IOCTL_H
-#define _AGP_COMPAT_IOCTL_H
-
-#include <linux/compat.h>
-#include <linux/agpgart.h>
-
-#define AGPIOC_INFO32 _IOR (AGPIOC_BASE, 0, compat_uptr_t)
-#define AGPIOC_ACQUIRE32 _IO (AGPIOC_BASE, 1)
-#define AGPIOC_RELEASE32 _IO (AGPIOC_BASE, 2)
-#define AGPIOC_SETUP32 _IOW (AGPIOC_BASE, 3, compat_uptr_t)
-#define AGPIOC_RESERVE32 _IOW (AGPIOC_BASE, 4, compat_uptr_t)
-#define AGPIOC_PROTECT32 _IOW (AGPIOC_BASE, 5, compat_uptr_t)
-#define AGPIOC_ALLOCATE32 _IOWR(AGPIOC_BASE, 6, compat_uptr_t)
-#define AGPIOC_DEALLOCATE32 _IOW (AGPIOC_BASE, 7, compat_int_t)
-#define AGPIOC_BIND32 _IOW (AGPIOC_BASE, 8, compat_uptr_t)
-#define AGPIOC_UNBIND32 _IOW (AGPIOC_BASE, 9, compat_uptr_t)
-#define AGPIOC_CHIPSET_FLUSH32 _IO (AGPIOC_BASE, 10)
-
-struct agp_info32 {
- struct agp_version version; /* version of the driver */
- u32 bridge_id; /* bridge vendor/device */
- u32 agp_mode; /* mode info of bridge */
- compat_long_t aper_base; /* base of aperture */
- compat_size_t aper_size; /* size of aperture */
- compat_size_t pg_total; /* max pages (swap + system) */
- compat_size_t pg_system; /* max pages (system) */
- compat_size_t pg_used; /* current pages used */
-};
-
-/*
- * The "prot" down below needs still a "sleep" flag somehow ...
- */
-struct agp_segment32 {
- compat_off_t pg_start; /* starting page to populate */
- compat_size_t pg_count; /* number of pages */
- compat_int_t prot; /* prot flags for mmap */
-};
-
-struct agp_region32 {
- compat_pid_t pid; /* pid of process */
- compat_size_t seg_count; /* number of segments */
- struct agp_segment32 *seg_list;
-};
-
-struct agp_allocate32 {
- compat_int_t key; /* tag of allocation */
- compat_size_t pg_count; /* number of pages */
- u32 type; /* 0 == normal, other devspec */
- u32 physical; /* device specific (some devices
- * need a phys address of the
- * actual page behind the gatt
- * table) */
-};
-
-struct agp_bind32 {
- compat_int_t key; /* tag of allocation */
- compat_off_t pg_start; /* starting page to populate */
-};
-
-struct agp_unbind32 {
- compat_int_t key; /* tag of allocation */
- u32 priority; /* priority for paging out */
-};
-
-extern struct agp_front_data agp_fe;
-
-int agpioc_acquire_wrap(struct agp_file_private *priv);
-int agpioc_release_wrap(struct agp_file_private *priv);
-int agpioc_protect_wrap(struct agp_file_private *priv);
-int agpioc_setup_wrap(struct agp_file_private *priv, void __user *arg);
-int agpioc_deallocate_wrap(struct agp_file_private *priv, int arg);
-struct agp_file_private *agp_find_private(pid_t pid);
-struct agp_client *agp_create_client(pid_t id);
-int agp_remove_client(pid_t id);
-int agp_create_segment(struct agp_client *client, struct agp_region *region);
-void agp_free_memory_wrap(struct agp_memory *memory);
-struct agp_memory *agp_allocate_memory_wrap(size_t pg_count, u32 type);
-struct agp_memory *agp_find_mem_by_key(int key);
-struct agp_client *agp_find_client_by_pid(pid_t id);
-
-#endif /* _AGP_COMPAT_H */
diff --git a/drivers/char/agp/frontend.c b/drivers/char/agp/frontend.c
deleted file mode 100644
index 321118a9cfa5..000000000000
--- a/drivers/char/agp/frontend.c
+++ /dev/null
@@ -1,1068 +0,0 @@
-/*
- * AGPGART driver frontend
- * Copyright (C) 2004 Silicon Graphics, Inc.
- * Copyright (C) 2002-2003 Dave Jones
- * Copyright (C) 1999 Jeff Hartmann
- * Copyright (C) 1999 Precision Insight, Inc.
- * Copyright (C) 1999 Xi Graphics, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
- * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * JEFF HARTMANN, OR ANY OTHER CONTRIBUTORS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
- * OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/mman.h>
-#include <linux/pci.h>
-#include <linux/miscdevice.h>
-#include <linux/agp_backend.h>
-#include <linux/agpgart.h>
-#include <linux/slab.h>
-#include <linux/mm.h>
-#include <linux/fs.h>
-#include <linux/sched.h>
-#include <linux/uaccess.h>
-
-#include "agp.h"
-#include "compat_ioctl.h"
-
-struct agp_front_data agp_fe;
-
-struct agp_memory *agp_find_mem_by_key(int key)
-{
- struct agp_memory *curr;
-
- if (agp_fe.current_controller == NULL)
- return NULL;
-
- curr = agp_fe.current_controller->pool;
-
- while (curr != NULL) {
- if (curr->key == key)
- break;
- curr = curr->next;
- }
-
- DBG("key=%d -> mem=%p", key, curr);
- return curr;
-}
-
-static void agp_remove_from_pool(struct agp_memory *temp)
-{
- struct agp_memory *prev;
- struct agp_memory *next;
-
- /* Check to see if this is even in the memory pool */
-
- DBG("mem=%p", temp);
- if (agp_find_mem_by_key(temp->key) != NULL) {
- next = temp->next;
- prev = temp->prev;
-
- if (prev != NULL) {
- prev->next = next;
- if (next != NULL)
- next->prev = prev;
-
- } else {
- /* This is the first item on the list */
- if (next != NULL)
- next->prev = NULL;
-
- agp_fe.current_controller->pool = next;
- }
- }
-}
-
-/*
- * Routines for managing each client's segment list -
- * These routines handle adding and removing segments
- * to each auth'ed client.
- */
-
-static struct
-agp_segment_priv *agp_find_seg_in_client(const struct agp_client *client,
- unsigned long offset,
- int size, pgprot_t page_prot)
-{
- struct agp_segment_priv *seg;
- int i;
- off_t pg_start;
- size_t pg_count;
-
- pg_start = offset / 4096;
- pg_count = size / 4096;
- seg = *(client->segments);
-
- for (i = 0; i < client->num_segments; i++) {
- if ((seg[i].pg_start == pg_start) &&
- (seg[i].pg_count == pg_count) &&
- (pgprot_val(seg[i].prot) == pgprot_val(page_prot))) {
- return seg + i;
- }
- }
-
- return NULL;
-}
-
-static void agp_remove_seg_from_client(struct agp_client *client)
-{
- DBG("client=%p", client);
-
- if (client->segments != NULL) {
- if (*(client->segments) != NULL) {
- DBG("Freeing %p from client %p", *(client->segments), client);
- kfree(*(client->segments));
- }
- DBG("Freeing %p from client %p", client->segments, client);
- kfree(client->segments);
- client->segments = NULL;
- }
-}
-
-static void agp_add_seg_to_client(struct agp_client *client,
- struct agp_segment_priv ** seg, int num_segments)
-{
- struct agp_segment_priv **prev_seg;
-
- prev_seg = client->segments;
-
- if (prev_seg != NULL)
- agp_remove_seg_from_client(client);
-
- DBG("Adding seg %p (%d segments) to client %p", seg, num_segments, client);
- client->num_segments = num_segments;
- client->segments = seg;
-}
-
-static pgprot_t agp_convert_mmap_flags(int prot)
-{
- unsigned long prot_bits;
-
- prot_bits = calc_vm_prot_bits(prot, 0) | VM_SHARED;
- return vm_get_page_prot(prot_bits);
-}
-
-int agp_create_segment(struct agp_client *client, struct agp_region *region)
-{
- struct agp_segment_priv **ret_seg;
- struct agp_segment_priv *seg;
- struct agp_segment *user_seg;
- size_t i;
-
- seg = kzalloc((sizeof(struct agp_segment_priv) * region->seg_count), GFP_KERNEL);
- if (seg == NULL) {
- kfree(region->seg_list);
- region->seg_list = NULL;
- return -ENOMEM;
- }
- user_seg = region->seg_list;
-
- for (i = 0; i < region->seg_count; i++) {
- seg[i].pg_start = user_seg[i].pg_start;
- seg[i].pg_count = user_seg[i].pg_count;
- seg[i].prot = agp_convert_mmap_flags(user_seg[i].prot);
- }
- kfree(region->seg_list);
- region->seg_list = NULL;
-
- ret_seg = kmalloc(sizeof(void *), GFP_KERNEL);
- if (ret_seg == NULL) {
- kfree(seg);
- return -ENOMEM;
- }
- *ret_seg = seg;
- agp_add_seg_to_client(client, ret_seg, region->seg_count);
- return 0;
-}
-
-/* End - Routines for managing each client's segment list */
-
-/* This function must only be called when current_controller != NULL */
-static void agp_insert_into_pool(struct agp_memory * temp)
-{
- struct agp_memory *prev;
-
- prev = agp_fe.current_controller->pool;
-
- if (prev != NULL) {
- prev->prev = temp;
- temp->next = prev;
- }
- agp_fe.current_controller->pool = temp;
-}
-
-
-/* File private list routines */
-
-struct agp_file_private *agp_find_private(pid_t pid)
-{
- struct agp_file_private *curr;
-
- curr = agp_fe.file_priv_list;
-
- while (curr != NULL) {
- if (curr->my_pid == pid)
- return curr;
- curr = curr->next;
- }
-
- return NULL;
-}
-
-static void agp_insert_file_private(struct agp_file_private * priv)
-{
- struct agp_file_private *prev;
-
- prev = agp_fe.file_priv_list;
-
- if (prev != NULL)
- prev->prev = priv;
- priv->next = prev;
- agp_fe.file_priv_list = priv;
-}
-
-static void agp_remove_file_private(struct agp_file_private * priv)
-{
- struct agp_file_private *next;
- struct agp_file_private *prev;
-
- next = priv->next;
- prev = priv->prev;
-
- if (prev != NULL) {
- prev->next = next;
-
- if (next != NULL)
- next->prev = prev;
-
- } else {
- if (next != NULL)
- next->prev = NULL;
-
- agp_fe.file_priv_list = next;
- }
-}
-
-/* End - File flag list routines */
-
-/*
- * Wrappers for agp_free_memory & agp_allocate_memory
- * These make sure that internal lists are kept updated.
- */
-void agp_free_memory_wrap(struct agp_memory *memory)
-{
- agp_remove_from_pool(memory);
- agp_free_memory(memory);
-}
-
-struct agp_memory *agp_allocate_memory_wrap(size_t pg_count, u32 type)
-{
- struct agp_memory *memory;
-
- memory = agp_allocate_memory(agp_bridge, pg_count, type);
- if (memory == NULL)
- return NULL;
-
- agp_insert_into_pool(memory);
- return memory;
-}
-
-/* Routines for managing the list of controllers -
- * These routines manage the current controller, and the list of
- * controllers
- */
-
-static struct agp_controller *agp_find_controller_by_pid(pid_t id)
-{
- struct agp_controller *controller;
-
- controller = agp_fe.controllers;
-
- while (controller != NULL) {
- if (controller->pid == id)
- return controller;
- controller = controller->next;
- }
-
- return NULL;
-}
-
-static struct agp_controller *agp_create_controller(pid_t id)
-{
- struct agp_controller *controller;
-
- controller = kzalloc(sizeof(struct agp_controller), GFP_KERNEL);
- if (controller == NULL)
- return NULL;
-
- controller->pid = id;
- return controller;
-}
-
-static int agp_insert_controller(struct agp_controller *controller)
-{
- struct agp_controller *prev_controller;
-
- prev_controller = agp_fe.controllers;
- controller->next = prev_controller;
-
- if (prev_controller != NULL)
- prev_controller->prev = controller;
-
- agp_fe.controllers = controller;
-
- return 0;
-}
-
-static void agp_remove_all_clients(struct agp_controller *controller)
-{
- struct agp_client *client;
- struct agp_client *temp;
-
- client = controller->clients;
-
- while (client) {
- struct agp_file_private *priv;
-
- temp = client;
- agp_remove_seg_from_client(temp);
- priv = agp_find_private(temp->pid);
-
- if (priv != NULL) {
- clear_bit(AGP_FF_IS_VALID, &priv->access_flags);
- clear_bit(AGP_FF_IS_CLIENT, &priv->access_flags);
- }
- client = client->next;
- kfree(temp);
- }
-}
-
-static void agp_remove_all_memory(struct agp_controller *controller)
-{
- struct agp_memory *memory;
- struct agp_memory *temp;
-
- memory = controller->pool;
-
- while (memory) {
- temp = memory;
- memory = memory->next;
- agp_free_memory_wrap(temp);
- }
-}
-
-static int agp_remove_controller(struct agp_controller *controller)
-{
- struct agp_controller *prev_controller;
- struct agp_controller *next_controller;
-
- prev_controller = controller->prev;
- next_controller = controller->next;
-
- if (prev_controller != NULL) {
- prev_controller->next = next_controller;
- if (next_controller != NULL)
- next_controller->prev = prev_controller;
-
- } else {
- if (next_controller != NULL)
- next_controller->prev = NULL;
-
- agp_fe.controllers = next_controller;
- }
-
- agp_remove_all_memory(controller);
- agp_remove_all_clients(controller);
-
- if (agp_fe.current_controller == controller) {
- agp_fe.current_controller = NULL;
- agp_fe.backend_acquired = false;
- agp_backend_release(agp_bridge);
- }
- kfree(controller);
- return 0;
-}
-
-static void agp_controller_make_current(struct agp_controller *controller)
-{
- struct agp_client *clients;
-
- clients = controller->clients;
-
- while (clients != NULL) {
- struct agp_file_private *priv;
-
- priv = agp_find_private(clients->pid);
-
- if (priv != NULL) {
- set_bit(AGP_FF_IS_VALID, &priv->access_flags);
- set_bit(AGP_FF_IS_CLIENT, &priv->access_flags);
- }
- clients = clients->next;
- }
-
- agp_fe.current_controller = controller;
-}
-
-static void agp_controller_release_current(struct agp_controller *controller,
- struct agp_file_private *controller_priv)
-{
- struct agp_client *clients;
-
- clear_bit(AGP_FF_IS_VALID, &controller_priv->access_flags);
- clients = controller->clients;
-
- while (clients != NULL) {
- struct agp_file_private *priv;
-
- priv = agp_find_private(clients->pid);
-
- if (priv != NULL)
- clear_bit(AGP_FF_IS_VALID, &priv->access_flags);
-
- clients = clients->next;
- }
-
- agp_fe.current_controller = NULL;
- agp_fe.used_by_controller = false;
- agp_backend_release(agp_bridge);
-}
-
-/*
- * Routines for managing client lists -
- * These routines are for managing the list of auth'ed clients.
- */
-
-static struct agp_client
-*agp_find_client_in_controller(struct agp_controller *controller, pid_t id)
-{
- struct agp_client *client;
-
- if (controller == NULL)
- return NULL;
-
- client = controller->clients;
-
- while (client != NULL) {
- if (client->pid == id)
- return client;
- client = client->next;
- }
-
- return NULL;
-}
-
-static struct agp_controller *agp_find_controller_for_client(pid_t id)
-{
- struct agp_controller *controller;
-
- controller = agp_fe.controllers;
-
- while (controller != NULL) {
- if ((agp_find_client_in_controller(controller, id)) != NULL)
- return controller;
- controller = controller->next;
- }
-
- return NULL;
-}
-
-struct agp_client *agp_find_client_by_pid(pid_t id)
-{
- struct agp_client *temp;
-
- if (agp_fe.current_controller == NULL)
- return NULL;
-
- temp = agp_find_client_in_controller(agp_fe.current_controller, id);
- return temp;
-}
-
-static void agp_insert_client(struct agp_client *client)
-{
- struct agp_client *prev_client;
-
- prev_client = agp_fe.current_controller->clients;
- client->next = prev_client;
-
- if (prev_client != NULL)
- prev_client->prev = client;
-
- agp_fe.current_controller->clients = client;
- agp_fe.current_controller->num_clients++;
-}
-
-struct agp_client *agp_create_client(pid_t id)
-{
- struct agp_client *new_client;
-
- new_client = kzalloc(sizeof(struct agp_client), GFP_KERNEL);
- if (new_client == NULL)
- return NULL;
-
- new_client->pid = id;
- agp_insert_client(new_client);
- return new_client;
-}
-
-int agp_remove_client(pid_t id)
-{
- struct agp_client *client;
- struct agp_client *prev_client;
- struct agp_client *next_client;
- struct agp_controller *controller;
-
- controller = agp_find_controller_for_client(id);
- if (controller == NULL)
- return -EINVAL;
-
- client = agp_find_client_in_controller(controller, id);
- if (client == NULL)
- return -EINVAL;
-
- prev_client = client->prev;
- next_client = client->next;
-
- if (prev_client != NULL) {
- prev_client->next = next_client;
- if (next_client != NULL)
- next_client->prev = prev_client;
-
- } else {
- if (next_client != NULL)
- next_client->prev = NULL;
- controller->clients = next_client;
- }
-
- controller->num_clients--;
- agp_remove_seg_from_client(client);
- kfree(client);
- return 0;
-}
-
-/* End - Routines for managing client lists */
-
-/* File Operations */
-
-static int agp_mmap(struct file *file, struct vm_area_struct *vma)
-{
- unsigned int size, current_size;
- unsigned long offset;
- struct agp_client *client;
- struct agp_file_private *priv = file->private_data;
- struct agp_kern_info kerninfo;
-
- mutex_lock(&(agp_fe.agp_mutex));
-
- if (agp_fe.backend_acquired != true)
- goto out_eperm;
-
- if (!(test_bit(AGP_FF_IS_VALID, &priv->access_flags)))
- goto out_eperm;
-
- agp_copy_info(agp_bridge, &kerninfo);
- size = vma->vm_end - vma->vm_start;
- current_size = kerninfo.aper_size;
- current_size = current_size * 0x100000;
- offset = vma->vm_pgoff << PAGE_SHIFT;
- DBG("%lx:%lx", offset, offset+size);
-
- if (test_bit(AGP_FF_IS_CLIENT, &priv->access_flags)) {
- if ((size + offset) > current_size)
- goto out_inval;
-
- client = agp_find_client_by_pid(current->pid);
-
- if (client == NULL)
- goto out_eperm;
-
- if (!agp_find_seg_in_client(client, offset, size, vma->vm_page_prot))
- goto out_inval;
-
- DBG("client vm_ops=%p", kerninfo.vm_ops);
- if (kerninfo.vm_ops) {
- vma->vm_ops = kerninfo.vm_ops;
- } else if (io_remap_pfn_range(vma, vma->vm_start,
- (kerninfo.aper_base + offset) >> PAGE_SHIFT,
- size,
- pgprot_writecombine(vma->vm_page_prot))) {
- goto out_again;
- }
- mutex_unlock(&(agp_fe.agp_mutex));
- return 0;
- }
-
- if (test_bit(AGP_FF_IS_CONTROLLER, &priv->access_flags)) {
- if (size != current_size)
- goto out_inval;
-
- DBG("controller vm_ops=%p", kerninfo.vm_ops);
- if (kerninfo.vm_ops) {
- vma->vm_ops = kerninfo.vm_ops;
- } else if (io_remap_pfn_range(vma, vma->vm_start,
- kerninfo.aper_base >> PAGE_SHIFT,
- size,
- pgprot_writecombine(vma->vm_page_prot))) {
- goto out_again;
- }
- mutex_unlock(&(agp_fe.agp_mutex));
- return 0;
- }
-
-out_eperm:
- mutex_unlock(&(agp_fe.agp_mutex));
- return -EPERM;
-
-out_inval:
- mutex_unlock(&(agp_fe.agp_mutex));
- return -EINVAL;
-
-out_again:
- mutex_unlock(&(agp_fe.agp_mutex));
- return -EAGAIN;
-}
-
-static int agp_release(struct inode *inode, struct file *file)
-{
- struct agp_file_private *priv = file->private_data;
-
- mutex_lock(&(agp_fe.agp_mutex));
-
- DBG("priv=%p", priv);
-
- if (test_bit(AGP_FF_IS_CONTROLLER, &priv->access_flags)) {
- struct agp_controller *controller;
-
- controller = agp_find_controller_by_pid(priv->my_pid);
-
- if (controller != NULL) {
- if (controller == agp_fe.current_controller)
- agp_controller_release_current(controller, priv);
- agp_remove_controller(controller);
- controller = NULL;
- }
- }
-
- if (test_bit(AGP_FF_IS_CLIENT, &priv->access_flags))
- agp_remove_client(priv->my_pid);
-
- agp_remove_file_private(priv);
- kfree(priv);
- file->private_data = NULL;
- mutex_unlock(&(agp_fe.agp_mutex));
- return 0;
-}
-
-static int agp_open(struct inode *inode, struct file *file)
-{
- int minor = iminor(inode);
- struct agp_file_private *priv;
- struct agp_client *client;
-
- if (minor != AGPGART_MINOR)
- return -ENXIO;
-
- mutex_lock(&(agp_fe.agp_mutex));
-
- priv = kzalloc(sizeof(struct agp_file_private), GFP_KERNEL);
- if (priv == NULL) {
- mutex_unlock(&(agp_fe.agp_mutex));
- return -ENOMEM;
- }
-
- set_bit(AGP_FF_ALLOW_CLIENT, &priv->access_flags);
- priv->my_pid = current->pid;
-
- if (capable(CAP_SYS_RAWIO))
- /* Root priv, can be controller */
- set_bit(AGP_FF_ALLOW_CONTROLLER, &priv->access_flags);
-
- client = agp_find_client_by_pid(current->pid);
-
- if (client != NULL) {
- set_bit(AGP_FF_IS_CLIENT, &priv->access_flags);
- set_bit(AGP_FF_IS_VALID, &priv->access_flags);
- }
- file->private_data = (void *) priv;
- agp_insert_file_private(priv);
- DBG("private=%p, client=%p", priv, client);
-
- mutex_unlock(&(agp_fe.agp_mutex));
-
- return 0;
-}
-
-static int agpioc_info_wrap(struct agp_file_private *priv, void __user *arg)
-{
- struct agp_info userinfo;
- struct agp_kern_info kerninfo;
-
- agp_copy_info(agp_bridge, &kerninfo);
-
- memset(&userinfo, 0, sizeof(userinfo));
- userinfo.version.major = kerninfo.version.major;
- userinfo.version.minor = kerninfo.version.minor;
- userinfo.bridge_id = kerninfo.device->vendor |
- (kerninfo.device->device << 16);
- userinfo.agp_mode = kerninfo.mode;
- userinfo.aper_base = kerninfo.aper_base;
- userinfo.aper_size = kerninfo.aper_size;
- userinfo.pg_total = userinfo.pg_system = kerninfo.max_memory;
- userinfo.pg_used = kerninfo.current_memory;
-
- if (copy_to_user(arg, &userinfo, sizeof(struct agp_info)))
- return -EFAULT;
-
- return 0;
-}
-
-int agpioc_acquire_wrap(struct agp_file_private *priv)
-{
- struct agp_controller *controller;
-
- DBG("");
-
- if (!(test_bit(AGP_FF_ALLOW_CONTROLLER, &priv->access_flags)))
- return -EPERM;
-
- if (agp_fe.current_controller != NULL)
- return -EBUSY;
-
- if (!agp_bridge)
- return -ENODEV;
-
- if (atomic_read(&agp_bridge->agp_in_use))
- return -EBUSY;
-
- atomic_inc(&agp_bridge->agp_in_use);
-
- agp_fe.backend_acquired = true;
-
- controller = agp_find_controller_by_pid(priv->my_pid);
-
- if (controller != NULL) {
- agp_controller_make_current(controller);
- } else {
- controller = agp_create_controller(priv->my_pid);
-
- if (controller == NULL) {
- agp_fe.backend_acquired = false;
- agp_backend_release(agp_bridge);
- return -ENOMEM;
- }
- agp_insert_controller(controller);
- agp_controller_make_current(controller);
- }
-
- set_bit(AGP_FF_IS_CONTROLLER, &priv->access_flags);
- set_bit(AGP_FF_IS_VALID, &priv->access_flags);
- return 0;
-}
-
-int agpioc_release_wrap(struct agp_file_private *priv)
-{
- DBG("");
- agp_controller_release_current(agp_fe.current_controller, priv);
- return 0;
-}
-
-int agpioc_setup_wrap(struct agp_file_private *priv, void __user *arg)
-{
- struct agp_setup mode;
-
- DBG("");
- if (copy_from_user(&mode, arg, sizeof(struct agp_setup)))
- return -EFAULT;
-
- agp_enable(agp_bridge, mode.agp_mode);
- return 0;
-}
-
-static int agpioc_reserve_wrap(struct agp_file_private *priv, void __user *arg)
-{
- struct agp_region reserve;
- struct agp_client *client;
- struct agp_file_private *client_priv;
-
- DBG("");
- if (copy_from_user(&reserve, arg, sizeof(struct agp_region)))
- return -EFAULT;
-
- if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment))
- return -EFAULT;
-
- client = agp_find_client_by_pid(reserve.pid);
-
- if (reserve.seg_count == 0) {
- /* remove a client */
- client_priv = agp_find_private(reserve.pid);
-
- if (client_priv != NULL) {
- set_bit(AGP_FF_IS_CLIENT, &client_priv->access_flags);
- set_bit(AGP_FF_IS_VALID, &client_priv->access_flags);
- }
- if (client == NULL) {
- /* client is already removed */
- return 0;
- }
- return agp_remove_client(reserve.pid);
- } else {
- struct agp_segment *segment;
-
- if (reserve.seg_count >= 16384)
- return -EINVAL;
-
- segment = kmalloc((sizeof(struct agp_segment) * reserve.seg_count),
- GFP_KERNEL);
-
- if (segment == NULL)
- return -ENOMEM;
-
- if (copy_from_user(segment, (void __user *) reserve.seg_list,
- sizeof(struct agp_segment) * reserve.seg_count)) {
- kfree(segment);
- return -EFAULT;
- }
- reserve.seg_list = segment;
-
- if (client == NULL) {
- /* Create the client and add the segment */
- client = agp_create_client(reserve.pid);
-
- if (client == NULL) {
- kfree(segment);
- return -ENOMEM;
- }
- client_priv = agp_find_private(reserve.pid);
-
- if (client_priv != NULL) {
- set_bit(AGP_FF_IS_CLIENT, &client_priv->access_flags);
- set_bit(AGP_FF_IS_VALID, &client_priv->access_flags);
- }
- }
- return agp_create_segment(client, &reserve);
- }
- /* Will never really happen */
- return -EINVAL;
-}
-
-int agpioc_protect_wrap(struct agp_file_private *priv)
-{
- DBG("");
- /* This function is not currently implemented */
- return -EINVAL;
-}
-
-static int agpioc_allocate_wrap(struct agp_file_private *priv, void __user *arg)
-{
- struct agp_memory *memory;
- struct agp_allocate alloc;
-
- DBG("");
- if (copy_from_user(&alloc, arg, sizeof(struct agp_allocate)))
- return -EFAULT;
-
- if (alloc.type >= AGP_USER_TYPES)
- return -EINVAL;
-
- memory = agp_allocate_memory_wrap(alloc.pg_count, alloc.type);
-
- if (memory == NULL)
- return -ENOMEM;
-
- alloc.key = memory->key;
- alloc.physical = memory->physical;
-
- if (copy_to_user(arg, &alloc, sizeof(struct agp_allocate))) {
- agp_free_memory_wrap(memory);
- return -EFAULT;
- }
- return 0;
-}
-
-int agpioc_deallocate_wrap(struct agp_file_private *priv, int arg)
-{
- struct agp_memory *memory;
-
- DBG("");
- memory = agp_find_mem_by_key(arg);
-
- if (memory == NULL)
- return -EINVAL;
-
- agp_free_memory_wrap(memory);
- return 0;
-}
-
-static int agpioc_bind_wrap(struct agp_file_private *priv, void __user *arg)
-{
- struct agp_bind bind_info;
- struct agp_memory *memory;
-
- DBG("");
- if (copy_from_user(&bind_info, arg, sizeof(struct agp_bind)))
- return -EFAULT;
-
- memory = agp_find_mem_by_key(bind_info.key);
-
- if (memory == NULL)
- return -EINVAL;
-
- return agp_bind_memory(memory, bind_info.pg_start);
-}
-
-static int agpioc_unbind_wrap(struct agp_file_private *priv, void __user *arg)
-{
- struct agp_memory *memory;
- struct agp_unbind unbind;
-
- DBG("");
- if (copy_from_user(&unbind, arg, sizeof(struct agp_unbind)))
- return -EFAULT;
-
- memory = agp_find_mem_by_key(unbind.key);
-
- if (memory == NULL)
- return -EINVAL;
-
- return agp_unbind_memory(memory);
-}
-
-static long agp_ioctl(struct file *file,
- unsigned int cmd, unsigned long arg)
-{
- struct agp_file_private *curr_priv = file->private_data;
- int ret_val = -ENOTTY;
-
- DBG("priv=%p, cmd=%x", curr_priv, cmd);
- mutex_lock(&(agp_fe.agp_mutex));
-
- if ((agp_fe.current_controller == NULL) &&
- (cmd != AGPIOC_ACQUIRE)) {
- ret_val = -EINVAL;
- goto ioctl_out;
- }
- if ((agp_fe.backend_acquired != true) &&
- (cmd != AGPIOC_ACQUIRE)) {
- ret_val = -EBUSY;
- goto ioctl_out;
- }
- if (cmd != AGPIOC_ACQUIRE) {
- if (!(test_bit(AGP_FF_IS_CONTROLLER, &curr_priv->access_flags))) {
- ret_val = -EPERM;
- goto ioctl_out;
- }
- /* Use the original pid of the controller,
- * in case it's threaded */
-
- if (agp_fe.current_controller->pid != curr_priv->my_pid) {
- ret_val = -EBUSY;
- goto ioctl_out;
- }
- }
-
- switch (cmd) {
- case AGPIOC_INFO:
- ret_val = agpioc_info_wrap(curr_priv, (void __user *) arg);
- break;
-
- case AGPIOC_ACQUIRE:
- ret_val = agpioc_acquire_wrap(curr_priv);
- break;
-
- case AGPIOC_RELEASE:
- ret_val = agpioc_release_wrap(curr_priv);
- break;
-
- case AGPIOC_SETUP:
- ret_val = agpioc_setup_wrap(curr_priv, (void __user *) arg);
- break;
-
- case AGPIOC_RESERVE:
- ret_val = agpioc_reserve_wrap(curr_priv, (void __user *) arg);
- break;
-
- case AGPIOC_PROTECT:
- ret_val = agpioc_protect_wrap(curr_priv);
- break;
-
- case AGPIOC_ALLOCATE:
- ret_val = agpioc_allocate_wrap(curr_priv, (void __user *) arg);
- break;
-
- case AGPIOC_DEALLOCATE:
- ret_val = agpioc_deallocate_wrap(curr_priv, (int) arg);
- break;
-
- case AGPIOC_BIND:
- ret_val = agpioc_bind_wrap(curr_priv, (void __user *) arg);
- break;
-
- case AGPIOC_UNBIND:
- ret_val = agpioc_unbind_wrap(curr_priv, (void __user *) arg);
- break;
-
- case AGPIOC_CHIPSET_FLUSH:
- break;
- }
-
-ioctl_out:
- DBG("ioctl returns %d\n", ret_val);
- mutex_unlock(&(agp_fe.agp_mutex));
- return ret_val;
-}
-
-static const struct file_operations agp_fops =
-{
- .owner = THIS_MODULE,
- .llseek = no_llseek,
- .unlocked_ioctl = agp_ioctl,
-#ifdef CONFIG_COMPAT
- .compat_ioctl = compat_agp_ioctl,
-#endif
- .mmap = agp_mmap,
- .open = agp_open,
- .release = agp_release,
-};
-
-static struct miscdevice agp_miscdev =
-{
- .minor = AGPGART_MINOR,
- .name = "agpgart",
- .fops = &agp_fops
-};
-
-int agp_frontend_initialize(void)
-{
- memset(&agp_fe, 0, sizeof(struct agp_front_data));
- mutex_init(&(agp_fe.agp_mutex));
-
- if (misc_register(&agp_miscdev)) {
- printk(KERN_ERR PFX "unable to get minor: %d\n", AGPGART_MINOR);
- return -EIO;
- }
- return 0;
-}
-
-void agp_frontend_cleanup(void)
-{
- misc_deregister(&agp_miscdev);
-}
diff --git a/drivers/cpufreq/amd-pstate.c b/drivers/cpufreq/amd-pstate.c
index 9a1e194d5cf8..1f6186475715 100644
--- a/drivers/cpufreq/amd-pstate.c
+++ b/drivers/cpufreq/amd-pstate.c
@@ -307,11 +307,11 @@ static int pstate_init_perf(struct amd_cpudata *cpudata)
highest_perf = AMD_CPPC_HIGHEST_PERF(cap1);
WRITE_ONCE(cpudata->highest_perf, highest_perf);
-
+ WRITE_ONCE(cpudata->max_limit_perf, highest_perf);
WRITE_ONCE(cpudata->nominal_perf, AMD_CPPC_NOMINAL_PERF(cap1));
WRITE_ONCE(cpudata->lowest_nonlinear_perf, AMD_CPPC_LOWNONLIN_PERF(cap1));
WRITE_ONCE(cpudata->lowest_perf, AMD_CPPC_LOWEST_PERF(cap1));
-
+ WRITE_ONCE(cpudata->min_limit_perf, AMD_CPPC_LOWEST_PERF(cap1));
return 0;
}
@@ -329,11 +329,12 @@ static int cppc_init_perf(struct amd_cpudata *cpudata)
highest_perf = cppc_perf.highest_perf;
WRITE_ONCE(cpudata->highest_perf, highest_perf);
-
+ WRITE_ONCE(cpudata->max_limit_perf, highest_perf);
WRITE_ONCE(cpudata->nominal_perf, cppc_perf.nominal_perf);
WRITE_ONCE(cpudata->lowest_nonlinear_perf,
cppc_perf.lowest_nonlinear_perf);
WRITE_ONCE(cpudata->lowest_perf, cppc_perf.lowest_perf);
+ WRITE_ONCE(cpudata->min_limit_perf, cppc_perf.lowest_perf);
if (cppc_state == AMD_PSTATE_ACTIVE)
return 0;
@@ -432,6 +433,10 @@ static void amd_pstate_update(struct amd_cpudata *cpudata, u32 min_perf,
u64 prev = READ_ONCE(cpudata->cppc_req_cached);
u64 value = prev;
+ min_perf = clamp_t(unsigned long, min_perf, cpudata->min_limit_perf,
+ cpudata->max_limit_perf);
+ max_perf = clamp_t(unsigned long, max_perf, cpudata->min_limit_perf,
+ cpudata->max_limit_perf);
des_perf = clamp_t(unsigned long, des_perf, min_perf, max_perf);
if ((cppc_state == AMD_PSTATE_GUIDED) && (gov_flags & CPUFREQ_GOV_DYNAMIC_SWITCHING)) {
@@ -470,6 +475,22 @@ static int amd_pstate_verify(struct cpufreq_policy_data *policy)
return 0;
}
+static int amd_pstate_update_min_max_limit(struct cpufreq_policy *policy)
+{
+ u32 max_limit_perf, min_limit_perf;
+ struct amd_cpudata *cpudata = policy->driver_data;
+
+ max_limit_perf = div_u64(policy->max * cpudata->highest_perf, cpudata->max_freq);
+ min_limit_perf = div_u64(policy->min * cpudata->highest_perf, cpudata->max_freq);
+
+ WRITE_ONCE(cpudata->max_limit_perf, max_limit_perf);
+ WRITE_ONCE(cpudata->min_limit_perf, min_limit_perf);
+ WRITE_ONCE(cpudata->max_limit_freq, policy->max);
+ WRITE_ONCE(cpudata->min_limit_freq, policy->min);
+
+ return 0;
+}
+
static int amd_pstate_update_freq(struct cpufreq_policy *policy,
unsigned int target_freq, bool fast_switch)
{
@@ -480,6 +501,9 @@ static int amd_pstate_update_freq(struct cpufreq_policy *policy,
if (!cpudata->max_freq)
return -ENODEV;
+ if (policy->min != cpudata->min_limit_freq || policy->max != cpudata->max_limit_freq)
+ amd_pstate_update_min_max_limit(policy);
+
cap_perf = READ_ONCE(cpudata->highest_perf);
min_perf = READ_ONCE(cpudata->lowest_perf);
max_perf = cap_perf;
@@ -518,7 +542,9 @@ static int amd_pstate_target(struct cpufreq_policy *policy,
static unsigned int amd_pstate_fast_switch(struct cpufreq_policy *policy,
unsigned int target_freq)
{
- return amd_pstate_update_freq(policy, target_freq, true);
+ if (!amd_pstate_update_freq(policy, target_freq, true))
+ return target_freq;
+ return policy->cur;
}
static void amd_pstate_adjust_perf(unsigned int cpu,
@@ -532,6 +558,10 @@ static void amd_pstate_adjust_perf(unsigned int cpu,
struct amd_cpudata *cpudata = policy->driver_data;
unsigned int target_freq;
+ if (policy->min != cpudata->min_limit_freq || policy->max != cpudata->max_limit_freq)
+ amd_pstate_update_min_max_limit(policy);
+
+
cap_perf = READ_ONCE(cpudata->highest_perf);
lowest_nonlinear_perf = READ_ONCE(cpudata->lowest_nonlinear_perf);
max_freq = READ_ONCE(cpudata->max_freq);
@@ -745,6 +775,8 @@ static int amd_pstate_cpu_init(struct cpufreq_policy *policy)
/* Initial processor data capability frequencies */
cpudata->max_freq = max_freq;
cpudata->min_freq = min_freq;
+ cpudata->max_limit_freq = max_freq;
+ cpudata->min_limit_freq = min_freq;
cpudata->nominal_freq = nominal_freq;
cpudata->lowest_nonlinear_freq = lowest_nonlinear_freq;
@@ -850,11 +882,16 @@ static ssize_t show_energy_performance_available_preferences(
{
int i = 0;
int offset = 0;
+ struct amd_cpudata *cpudata = policy->driver_data;
+
+ if (cpudata->policy == CPUFREQ_POLICY_PERFORMANCE)
+ return sysfs_emit_at(buf, offset, "%s\n",
+ energy_perf_strings[EPP_INDEX_PERFORMANCE]);
while (energy_perf_strings[i] != NULL)
offset += sysfs_emit_at(buf, offset, "%s ", energy_perf_strings[i++]);
- sysfs_emit_at(buf, offset, "\n");
+ offset += sysfs_emit_at(buf, offset, "\n");
return offset;
}
@@ -1183,16 +1220,25 @@ static int amd_pstate_epp_cpu_exit(struct cpufreq_policy *policy)
return 0;
}
-static void amd_pstate_epp_init(unsigned int cpu)
+static void amd_pstate_epp_update_limit(struct cpufreq_policy *policy)
{
- struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
struct amd_cpudata *cpudata = policy->driver_data;
- u32 max_perf, min_perf;
+ u32 max_perf, min_perf, min_limit_perf, max_limit_perf;
u64 value;
s16 epp;
max_perf = READ_ONCE(cpudata->highest_perf);
min_perf = READ_ONCE(cpudata->lowest_perf);
+ max_limit_perf = div_u64(policy->max * cpudata->highest_perf, cpudata->max_freq);
+ min_limit_perf = div_u64(policy->min * cpudata->highest_perf, cpudata->max_freq);
+
+ max_perf = clamp_t(unsigned long, max_perf, cpudata->min_limit_perf,
+ cpudata->max_limit_perf);
+ min_perf = clamp_t(unsigned long, min_perf, cpudata->min_limit_perf,
+ cpudata->max_limit_perf);
+
+ WRITE_ONCE(cpudata->max_limit_perf, max_limit_perf);
+ WRITE_ONCE(cpudata->min_limit_perf, min_limit_perf);
value = READ_ONCE(cpudata->cppc_req_cached);
@@ -1210,9 +1256,6 @@ static void amd_pstate_epp_init(unsigned int cpu)
value &= ~AMD_CPPC_DES_PERF(~0L);
value |= AMD_CPPC_DES_PERF(0);
- if (cpudata->epp_policy == cpudata->policy)
- goto skip_epp;
-
cpudata->epp_policy = cpudata->policy;
/* Get BIOS pre-defined epp value */
@@ -1222,7 +1265,7 @@ static void amd_pstate_epp_init(unsigned int cpu)
* This return value can only be negative for shared_memory
* systems where EPP register read/write not supported.
*/
- goto skip_epp;
+ return;
}
if (cpudata->policy == CPUFREQ_POLICY_PERFORMANCE)
@@ -1236,8 +1279,6 @@ static void amd_pstate_epp_init(unsigned int cpu)
WRITE_ONCE(cpudata->cppc_req_cached, value);
amd_pstate_set_epp(cpudata, epp);
-skip_epp:
- cpufreq_cpu_put(policy);
}
static int amd_pstate_epp_set_policy(struct cpufreq_policy *policy)
@@ -1252,7 +1293,7 @@ static int amd_pstate_epp_set_policy(struct cpufreq_policy *policy)
cpudata->policy = policy->policy;
- amd_pstate_epp_init(policy->cpu);
+ amd_pstate_epp_update_limit(policy);
return 0;
}
diff --git a/drivers/cpufreq/imx6q-cpufreq.c b/drivers/cpufreq/imx6q-cpufreq.c
index 494d044b9e72..33728c242f66 100644
--- a/drivers/cpufreq/imx6q-cpufreq.c
+++ b/drivers/cpufreq/imx6q-cpufreq.c
@@ -327,7 +327,7 @@ static int imx6ul_opp_check_speed_grading(struct device *dev)
imx6x_disable_freq_in_opp(dev, 696000000);
if (of_machine_is_compatible("fsl,imx6ull")) {
- if (val != OCOTP_CFG3_6ULL_SPEED_792MHZ)
+ if (val < OCOTP_CFG3_6ULL_SPEED_792MHZ)
imx6x_disable_freq_in_opp(dev, 792000000);
if (val != OCOTP_CFG3_6ULL_SPEED_900MHZ)
diff --git a/drivers/cpufreq/qcom-cpufreq-nvmem.c b/drivers/cpufreq/qcom-cpufreq-nvmem.c
index 6355a39418c5..ea05d9d67490 100644
--- a/drivers/cpufreq/qcom-cpufreq-nvmem.c
+++ b/drivers/cpufreq/qcom-cpufreq-nvmem.c
@@ -23,8 +23,10 @@
#include <linux/nvmem-consumer.h>
#include <linux/of.h>
#include <linux/platform_device.h>
+#include <linux/pm.h>
#include <linux/pm_domain.h>
#include <linux/pm_opp.h>
+#include <linux/pm_runtime.h>
#include <linux/slab.h>
#include <linux/soc/qcom/smem.h>
@@ -55,6 +57,7 @@ struct qcom_cpufreq_match_data {
struct qcom_cpufreq_drv_cpu {
int opp_token;
+ struct device **virt_devs;
};
struct qcom_cpufreq_drv {
@@ -424,6 +427,30 @@ static const struct qcom_cpufreq_match_data match_data_ipq8074 = {
.get_version = qcom_cpufreq_ipq8074_name_version,
};
+static void qcom_cpufreq_suspend_virt_devs(struct qcom_cpufreq_drv *drv, unsigned int cpu)
+{
+ const char * const *name = drv->data->genpd_names;
+ int i;
+
+ if (!drv->cpus[cpu].virt_devs)
+ return;
+
+ for (i = 0; *name; i++, name++)
+ device_set_awake_path(drv->cpus[cpu].virt_devs[i]);
+}
+
+static void qcom_cpufreq_put_virt_devs(struct qcom_cpufreq_drv *drv, unsigned int cpu)
+{
+ const char * const *name = drv->data->genpd_names;
+ int i;
+
+ if (!drv->cpus[cpu].virt_devs)
+ return;
+
+ for (i = 0; *name; i++, name++)
+ pm_runtime_put(drv->cpus[cpu].virt_devs[i]);
+}
+
static int qcom_cpufreq_probe(struct platform_device *pdev)
{
struct qcom_cpufreq_drv *drv;
@@ -478,6 +505,7 @@ static int qcom_cpufreq_probe(struct platform_device *pdev)
of_node_put(np);
for_each_possible_cpu(cpu) {
+ struct device **virt_devs = NULL;
struct dev_pm_opp_config config = {
.supported_hw = NULL,
};
@@ -498,7 +526,7 @@ static int qcom_cpufreq_probe(struct platform_device *pdev)
if (drv->data->genpd_names) {
config.genpd_names = drv->data->genpd_names;
- config.virt_devs = NULL;
+ config.virt_devs = &virt_devs;
}
if (config.supported_hw || config.genpd_names) {
@@ -509,6 +537,27 @@ static int qcom_cpufreq_probe(struct platform_device *pdev)
goto free_opp;
}
}
+
+ if (virt_devs) {
+ const char * const *name = config.genpd_names;
+ int i, j;
+
+ for (i = 0; *name; i++, name++) {
+ ret = pm_runtime_resume_and_get(virt_devs[i]);
+ if (ret) {
+ dev_err(cpu_dev, "failed to resume %s: %d\n",
+ *name, ret);
+
+ /* Rollback previous PM runtime calls */
+ name = config.genpd_names;
+ for (j = 0; *name && j < i; j++, name++)
+ pm_runtime_put(virt_devs[j]);
+
+ goto free_opp;
+ }
+ }
+ drv->cpus[cpu].virt_devs = virt_devs;
+ }
}
cpufreq_dt_pdev = platform_device_register_simple("cpufreq-dt", -1,
@@ -522,8 +571,10 @@ static int qcom_cpufreq_probe(struct platform_device *pdev)
dev_err(cpu_dev, "Failed to register platform device\n");
free_opp:
- for_each_possible_cpu(cpu)
+ for_each_possible_cpu(cpu) {
+ qcom_cpufreq_put_virt_devs(drv, cpu);
dev_pm_opp_clear_config(drv->cpus[cpu].opp_token);
+ }
return ret;
}
@@ -534,15 +585,31 @@ static void qcom_cpufreq_remove(struct platform_device *pdev)
platform_device_unregister(cpufreq_dt_pdev);
- for_each_possible_cpu(cpu)
+ for_each_possible_cpu(cpu) {
+ qcom_cpufreq_put_virt_devs(drv, cpu);
dev_pm_opp_clear_config(drv->cpus[cpu].opp_token);
+ }
}
+static int qcom_cpufreq_suspend(struct device *dev)
+{
+ struct qcom_cpufreq_drv *drv = dev_get_drvdata(dev);
+ unsigned int cpu;
+
+ for_each_possible_cpu(cpu)
+ qcom_cpufreq_suspend_virt_devs(drv, cpu);
+
+ return 0;
+}
+
+static DEFINE_SIMPLE_DEV_PM_OPS(qcom_cpufreq_pm_ops, qcom_cpufreq_suspend, NULL);
+
static struct platform_driver qcom_cpufreq_driver = {
.probe = qcom_cpufreq_probe,
.remove_new = qcom_cpufreq_remove,
.driver = {
.name = "qcom-cpufreq-nvmem",
+ .pm = pm_sleep_ptr(&qcom_cpufreq_pm_ops),
},
};
diff --git a/drivers/dma-buf/dma-fence.c b/drivers/dma-buf/dma-fence.c
index 8aa8f8cb7071..e0fd99e61a2d 100644
--- a/drivers/dma-buf/dma-fence.c
+++ b/drivers/dma-buf/dma-fence.c
@@ -934,7 +934,8 @@ EXPORT_SYMBOL(dma_fence_wait_any_timeout);
* the GPU's devfreq to reduce frequency, when in fact the opposite is what is
* needed.
*
- * To this end, deadline hint(s) can be set on a &dma_fence via &dma_fence_set_deadline.
+ * To this end, deadline hint(s) can be set on a &dma_fence via &dma_fence_set_deadline
+ * (or indirectly via userspace facing ioctls like &sync_set_deadline).
* The deadline hint provides a way for the waiting driver, or userspace, to
* convey an appropriate sense of urgency to the signaling driver.
*
diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c
index 38b4110378de..eb8b733065b2 100644
--- a/drivers/dma-buf/dma-resv.c
+++ b/drivers/dma-buf/dma-resv.c
@@ -301,7 +301,7 @@ void dma_resv_add_fence(struct dma_resv *obj, struct dma_fence *fence,
dma_resv_list_entry(fobj, i, obj, &old, &old_usage);
if ((old->context == fence->context && old_usage >= usage &&
- dma_fence_is_later(fence, old)) ||
+ dma_fence_is_later_or_same(fence, old)) ||
dma_fence_is_signaled(old)) {
dma_resv_list_set(fobj, i, fence, usage);
dma_fence_put(old);
diff --git a/drivers/dma-buf/sw_sync.c b/drivers/dma-buf/sw_sync.c
index f0a35277fd84..c353029789cf 100644
--- a/drivers/dma-buf/sw_sync.c
+++ b/drivers/dma-buf/sw_sync.c
@@ -52,12 +52,33 @@ struct sw_sync_create_fence_data {
__s32 fence; /* fd of new fence */
};
+/**
+ * struct sw_sync_get_deadline - get the deadline hint of a sw_sync fence
+ * @deadline_ns: absolute time of the deadline
+ * @pad: must be zero
+ * @fence_fd: the sw_sync fence fd (in)
+ *
+ * Return the earliest deadline set on the fence. The timebase for the
+ * deadline is CLOCK_MONOTONIC (same as vblank). If there is no deadline
+ * set on the fence, this ioctl will return -ENOENT.
+ */
+struct sw_sync_get_deadline {
+ __u64 deadline_ns;
+ __u32 pad;
+ __s32 fence_fd;
+};
+
#define SW_SYNC_IOC_MAGIC 'W'
#define SW_SYNC_IOC_CREATE_FENCE _IOWR(SW_SYNC_IOC_MAGIC, 0,\
struct sw_sync_create_fence_data)
#define SW_SYNC_IOC_INC _IOW(SW_SYNC_IOC_MAGIC, 1, __u32)
+#define SW_SYNC_GET_DEADLINE _IOWR(SW_SYNC_IOC_MAGIC, 2, \
+ struct sw_sync_get_deadline)
+
+
+#define SW_SYNC_HAS_DEADLINE_BIT DMA_FENCE_FLAG_USER_BITS
static const struct dma_fence_ops timeline_fence_ops;
@@ -171,6 +192,22 @@ static void timeline_fence_timeline_value_str(struct dma_fence *fence,
snprintf(str, size, "%d", parent->value);
}
+static void timeline_fence_set_deadline(struct dma_fence *fence, ktime_t deadline)
+{
+ struct sync_pt *pt = dma_fence_to_sync_pt(fence);
+ unsigned long flags;
+
+ spin_lock_irqsave(fence->lock, flags);
+ if (test_bit(SW_SYNC_HAS_DEADLINE_BIT, &fence->flags)) {
+ if (ktime_before(deadline, pt->deadline))
+ pt->deadline = deadline;
+ } else {
+ pt->deadline = deadline;
+ __set_bit(SW_SYNC_HAS_DEADLINE_BIT, &fence->flags);
+ }
+ spin_unlock_irqrestore(fence->lock, flags);
+}
+
static const struct dma_fence_ops timeline_fence_ops = {
.get_driver_name = timeline_fence_get_driver_name,
.get_timeline_name = timeline_fence_get_timeline_name,
@@ -179,6 +216,7 @@ static const struct dma_fence_ops timeline_fence_ops = {
.release = timeline_fence_release,
.fence_value_str = timeline_fence_value_str,
.timeline_value_str = timeline_fence_timeline_value_str,
+ .set_deadline = timeline_fence_set_deadline,
};
/**
@@ -387,6 +425,47 @@ static long sw_sync_ioctl_inc(struct sync_timeline *obj, unsigned long arg)
return 0;
}
+static int sw_sync_ioctl_get_deadline(struct sync_timeline *obj, unsigned long arg)
+{
+ struct sw_sync_get_deadline data;
+ struct dma_fence *fence;
+ unsigned long flags;
+ struct sync_pt *pt;
+ int ret = 0;
+
+ if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
+ return -EFAULT;
+
+ if (data.deadline_ns || data.pad)
+ return -EINVAL;
+
+ fence = sync_file_get_fence(data.fence_fd);
+ if (!fence)
+ return -EINVAL;
+
+ pt = dma_fence_to_sync_pt(fence);
+ if (!pt)
+ return -EINVAL;
+
+ spin_lock_irqsave(fence->lock, flags);
+ if (test_bit(SW_SYNC_HAS_DEADLINE_BIT, &fence->flags)) {
+ data.deadline_ns = ktime_to_ns(pt->deadline);
+ } else {
+ ret = -ENOENT;
+ }
+ spin_unlock_irqrestore(fence->lock, flags);
+
+ dma_fence_put(fence);
+
+ if (ret)
+ return ret;
+
+ if (copy_to_user((void __user *)arg, &data, sizeof(data)))
+ return -EFAULT;
+
+ return 0;
+}
+
static long sw_sync_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
@@ -399,6 +478,9 @@ static long sw_sync_ioctl(struct file *file, unsigned int cmd,
case SW_SYNC_IOC_INC:
return sw_sync_ioctl_inc(obj, arg);
+ case SW_SYNC_GET_DEADLINE:
+ return sw_sync_ioctl_get_deadline(obj, arg);
+
default:
return -ENOTTY;
}
diff --git a/drivers/dma-buf/sync_debug.h b/drivers/dma-buf/sync_debug.h
index 6176e52ba2d7..a1bdd62efccd 100644
--- a/drivers/dma-buf/sync_debug.h
+++ b/drivers/dma-buf/sync_debug.h
@@ -55,11 +55,13 @@ static inline struct sync_timeline *dma_fence_parent(struct dma_fence *fence)
* @base: base fence object
* @link: link on the sync timeline's list
* @node: node in the sync timeline's tree
+ * @deadline: the earliest fence deadline hint
*/
struct sync_pt {
struct dma_fence base;
struct list_head link;
struct rb_node node;
+ ktime_t deadline;
};
extern const struct file_operations sw_sync_debugfs_fops;
diff --git a/drivers/dma-buf/sync_file.c b/drivers/dma-buf/sync_file.c
index 2e9a316c596a..d9b1c1b2a72b 100644
--- a/drivers/dma-buf/sync_file.c
+++ b/drivers/dma-buf/sync_file.c
@@ -347,6 +347,22 @@ out:
return ret;
}
+static int sync_file_ioctl_set_deadline(struct sync_file *sync_file,
+ unsigned long arg)
+{
+ struct sync_set_deadline ts;
+
+ if (copy_from_user(&ts, (void __user *)arg, sizeof(ts)))
+ return -EFAULT;
+
+ if (ts.pad)
+ return -EINVAL;
+
+ dma_fence_set_deadline(sync_file->fence, ns_to_ktime(ts.deadline_ns));
+
+ return 0;
+}
+
static long sync_file_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
@@ -359,6 +375,9 @@ static long sync_file_ioctl(struct file *file, unsigned int cmd,
case SYNC_IOC_FILE_INFO:
return sync_file_ioctl_fence_info(sync_file, arg);
+ case SYNC_IOC_SET_DEADLINE:
+ return sync_file_ioctl_set_deadline(sync_file, arg);
+
default:
return -ENOTTY;
}
diff --git a/drivers/firewire/core-device.c b/drivers/firewire/core-device.c
index aa597cda0d88..2828e9573e90 100644
--- a/drivers/firewire/core-device.c
+++ b/drivers/firewire/core-device.c
@@ -717,14 +717,11 @@ static void create_units(struct fw_device *device)
fw_unit_attributes,
&unit->attribute_group);
- if (device_register(&unit->device) < 0)
- goto skip_unit;
-
fw_device_get(device);
- continue;
-
- skip_unit:
- kfree(unit);
+ if (device_register(&unit->device) < 0) {
+ put_device(&unit->device);
+ continue;
+ }
}
}
diff --git a/drivers/firewire/sbp2.c b/drivers/firewire/sbp2.c
index 7edf2c95282f..e779d866022b 100644
--- a/drivers/firewire/sbp2.c
+++ b/drivers/firewire/sbp2.c
@@ -1519,9 +1519,9 @@ static int sbp2_scsi_slave_configure(struct scsi_device *sdev)
sdev->use_10_for_rw = 1;
if (sbp2_param_exclusive_login) {
- sdev->manage_system_start_stop = true;
- sdev->manage_runtime_start_stop = true;
- sdev->manage_shutdown = true;
+ sdev->manage_system_start_stop = 1;
+ sdev->manage_runtime_start_stop = 1;
+ sdev->manage_shutdown = 1;
}
if (sdev->type == TYPE_ROM)
diff --git a/drivers/firmware/arm_ffa/driver.c b/drivers/firmware/arm_ffa/driver.c
index 07b72c679247..6146b2927d5c 100644
--- a/drivers/firmware/arm_ffa/driver.c
+++ b/drivers/firmware/arm_ffa/driver.c
@@ -99,6 +99,7 @@ struct ffa_drv_info {
void *tx_buffer;
bool mem_ops_native;
bool bitmap_created;
+ bool notif_enabled;
unsigned int sched_recv_irq;
unsigned int cpuhp_state;
struct ffa_pcpu_irq __percpu *irq_pcpu;
@@ -782,7 +783,7 @@ static void ffa_notification_info_get(void)
if (ids_processed >= max_ids - 1)
break;
- part_id = packed_id_list[++ids_processed];
+ part_id = packed_id_list[ids_processed++];
if (!ids_count[list]) { /* Global Notification */
__do_sched_recv_cb(part_id, 0, false);
@@ -794,7 +795,7 @@ static void ffa_notification_info_get(void)
if (ids_processed >= max_ids - 1)
break;
- vcpu_id = packed_id_list[++ids_processed];
+ vcpu_id = packed_id_list[ids_processed++];
__do_sched_recv_cb(part_id, vcpu_id, true);
}
@@ -889,6 +890,8 @@ static int ffa_memory_lend(struct ffa_mem_ops_args *args)
#define FFA_SECURE_PARTITION_ID_FLAG BIT(15)
+#define ffa_notifications_disabled() (!drv_info->notif_enabled)
+
enum notify_type {
NON_SECURE_VM,
SECURE_PARTITION,
@@ -908,6 +911,9 @@ static int ffa_sched_recv_cb_update(u16 part_id, ffa_sched_recv_cb callback,
struct ffa_dev_part_info *partition;
bool cb_valid;
+ if (ffa_notifications_disabled())
+ return -EOPNOTSUPP;
+
partition = xa_load(&drv_info->partition_info, part_id);
write_lock(&partition->rw_lock);
@@ -1001,6 +1007,9 @@ static int ffa_notify_relinquish(struct ffa_device *dev, int notify_id)
int rc;
enum notify_type type = ffa_notify_type_get(dev->vm_id);
+ if (ffa_notifications_disabled())
+ return -EOPNOTSUPP;
+
if (notify_id >= FFA_MAX_NOTIFICATIONS)
return -EINVAL;
@@ -1027,6 +1036,9 @@ static int ffa_notify_request(struct ffa_device *dev, bool is_per_vcpu,
u32 flags = 0;
enum notify_type type = ffa_notify_type_get(dev->vm_id);
+ if (ffa_notifications_disabled())
+ return -EOPNOTSUPP;
+
if (notify_id >= FFA_MAX_NOTIFICATIONS)
return -EINVAL;
@@ -1057,6 +1069,9 @@ static int ffa_notify_send(struct ffa_device *dev, int notify_id,
{
u32 flags = 0;
+ if (ffa_notifications_disabled())
+ return -EOPNOTSUPP;
+
if (is_per_vcpu)
flags |= (PER_VCPU_NOTIFICATION_FLAG | vcpu << 16);
@@ -1233,7 +1248,7 @@ static void ffa_partitions_cleanup(void)
if (!count)
return;
- info = kcalloc(count, sizeof(**info), GFP_KERNEL);
+ info = kcalloc(count, sizeof(*info), GFP_KERNEL);
if (!info)
return;
@@ -1311,8 +1326,10 @@ static int ffa_sched_recv_irq_map(void)
static void ffa_sched_recv_irq_unmap(void)
{
- if (drv_info->sched_recv_irq)
+ if (drv_info->sched_recv_irq) {
irq_dispose_mapping(drv_info->sched_recv_irq);
+ drv_info->sched_recv_irq = 0;
+ }
}
static int ffa_cpuhp_pcpu_irq_enable(unsigned int cpu)
@@ -1329,17 +1346,23 @@ static int ffa_cpuhp_pcpu_irq_disable(unsigned int cpu)
static void ffa_uninit_pcpu_irq(void)
{
- if (drv_info->cpuhp_state)
+ if (drv_info->cpuhp_state) {
cpuhp_remove_state(drv_info->cpuhp_state);
+ drv_info->cpuhp_state = 0;
+ }
- if (drv_info->notif_pcpu_wq)
+ if (drv_info->notif_pcpu_wq) {
destroy_workqueue(drv_info->notif_pcpu_wq);
+ drv_info->notif_pcpu_wq = NULL;
+ }
if (drv_info->sched_recv_irq)
free_percpu_irq(drv_info->sched_recv_irq, drv_info->irq_pcpu);
- if (drv_info->irq_pcpu)
+ if (drv_info->irq_pcpu) {
free_percpu(drv_info->irq_pcpu);
+ drv_info->irq_pcpu = NULL;
+ }
}
static int ffa_init_pcpu_irq(unsigned int irq)
@@ -1388,22 +1411,23 @@ static void ffa_notifications_cleanup(void)
ffa_notification_bitmap_destroy();
drv_info->bitmap_created = false;
}
+ drv_info->notif_enabled = false;
}
-static int ffa_notifications_setup(void)
+static void ffa_notifications_setup(void)
{
int ret, irq;
ret = ffa_features(FFA_NOTIFICATION_BITMAP_CREATE, 0, NULL, NULL);
if (ret) {
- pr_err("Notifications not supported, continuing with it ..\n");
- return 0;
+ pr_info("Notifications not supported, continuing with it ..\n");
+ return;
}
ret = ffa_notification_bitmap_create();
if (ret) {
- pr_err("notification_bitmap_create error %d\n", ret);
- return ret;
+ pr_info("Notification bitmap create error %d\n", ret);
+ return;
}
drv_info->bitmap_created = true;
@@ -1422,14 +1446,11 @@ static int ffa_notifications_setup(void)
hash_init(drv_info->notifier_hash);
mutex_init(&drv_info->notify_lock);
- /* Register internal scheduling callback */
- ret = ffa_sched_recv_cb_update(drv_info->vm_id, ffa_self_notif_handle,
- drv_info, true);
- if (!ret)
- return ret;
+ drv_info->notif_enabled = true;
+ return;
cleanup:
+ pr_info("Notification setup failed %d, not enabled\n", ret);
ffa_notifications_cleanup();
- return ret;
}
static int __init ffa_init(void)
@@ -1483,17 +1504,18 @@ static int __init ffa_init(void)
mutex_init(&drv_info->rx_lock);
mutex_init(&drv_info->tx_lock);
- ffa_setup_partitions();
-
ffa_set_up_mem_ops_native_flag();
- ret = ffa_notifications_setup();
+ ffa_notifications_setup();
+
+ ffa_setup_partitions();
+
+ ret = ffa_sched_recv_cb_update(drv_info->vm_id, ffa_self_notif_handle,
+ drv_info, true);
if (ret)
- goto partitions_cleanup;
+ pr_info("Failed to register driver sched callback %d\n", ret);
return 0;
-partitions_cleanup:
- ffa_partitions_cleanup();
free_pages:
if (drv_info->tx_buffer)
free_pages_exact(drv_info->tx_buffer, RXTX_BUFFER_SIZE);
diff --git a/drivers/firmware/arm_scmi/perf.c b/drivers/firmware/arm_scmi/perf.c
index c2435be0ae1b..e11555de99ab 100644
--- a/drivers/firmware/arm_scmi/perf.c
+++ b/drivers/firmware/arm_scmi/perf.c
@@ -152,7 +152,7 @@ struct perf_dom_info {
u32 opp_count;
u32 sustained_freq_khz;
u32 sustained_perf_level;
- u32 mult_factor;
+ unsigned long mult_factor;
struct scmi_perf_domain_info info;
struct scmi_opp opp[MAX_OPPS];
struct scmi_fc_info *fc_info;
@@ -268,13 +268,14 @@ scmi_perf_domain_attributes_get(const struct scmi_protocol_handle *ph,
dom_info->sustained_perf_level =
le32_to_cpu(attr->sustained_perf_level);
if (!dom_info->sustained_freq_khz ||
- !dom_info->sustained_perf_level)
+ !dom_info->sustained_perf_level ||
+ dom_info->level_indexing_mode)
/* CPUFreq converts to kHz, hence default 1000 */
dom_info->mult_factor = 1000;
else
dom_info->mult_factor =
- (dom_info->sustained_freq_khz * 1000) /
- dom_info->sustained_perf_level;
+ (dom_info->sustained_freq_khz * 1000UL)
+ / dom_info->sustained_perf_level;
strscpy(dom_info->info.name, attr->name,
SCMI_SHORT_NAME_MAX_SIZE);
}
@@ -798,7 +799,7 @@ static int scmi_dvfs_device_opps_add(const struct scmi_protocol_handle *ph,
if (!dom->level_indexing_mode)
freq = dom->opp[idx].perf * dom->mult_factor;
else
- freq = dom->opp[idx].indicative_freq * 1000;
+ freq = dom->opp[idx].indicative_freq * dom->mult_factor;
data.level = dom->opp[idx].perf;
data.freq = freq;
@@ -845,7 +846,8 @@ static int scmi_dvfs_freq_set(const struct scmi_protocol_handle *ph, u32 domain,
} else {
struct scmi_opp *opp;
- opp = LOOKUP_BY_FREQ(dom->opps_by_freq, freq / 1000);
+ opp = LOOKUP_BY_FREQ(dom->opps_by_freq,
+ freq / dom->mult_factor);
if (!opp)
return -EIO;
@@ -879,7 +881,7 @@ static int scmi_dvfs_freq_get(const struct scmi_protocol_handle *ph, u32 domain,
if (!opp)
return -EIO;
- *freq = opp->indicative_freq * 1000;
+ *freq = opp->indicative_freq * dom->mult_factor;
}
return ret;
@@ -902,7 +904,7 @@ static int scmi_dvfs_est_power_get(const struct scmi_protocol_handle *ph,
if (!dom->level_indexing_mode)
opp_freq = opp->perf * dom->mult_factor;
else
- opp_freq = opp->indicative_freq * 1000;
+ opp_freq = opp->indicative_freq * dom->mult_factor;
if (opp_freq < *freq)
continue;
diff --git a/drivers/firmware/efi/unaccepted_memory.c b/drivers/firmware/efi/unaccepted_memory.c
index 3f2f7bf6e335..5b439d04079c 100644
--- a/drivers/firmware/efi/unaccepted_memory.c
+++ b/drivers/firmware/efi/unaccepted_memory.c
@@ -101,7 +101,7 @@ retry:
* overlap on physical address level.
*/
list_for_each_entry(entry, &accepting_list, list) {
- if (entry->end < range.start)
+ if (entry->end <= range.start)
continue;
if (entry->start >= range.end)
continue;
diff --git a/drivers/gpio/gpiolib-sysfs.c b/drivers/gpio/gpiolib-sysfs.c
index 6f309a3b2d9a..12d853845bb8 100644
--- a/drivers/gpio/gpiolib-sysfs.c
+++ b/drivers/gpio/gpiolib-sysfs.c
@@ -474,14 +474,17 @@ static ssize_t export_store(const struct class *class,
goto done;
status = gpiod_set_transitory(desc, false);
- if (!status) {
- status = gpiod_export(desc, true);
- if (status < 0)
- gpiod_free(desc);
- else
- set_bit(FLAG_SYSFS, &desc->flags);
+ if (status) {
+ gpiod_free(desc);
+ goto done;
}
+ status = gpiod_export(desc, true);
+ if (status < 0)
+ gpiod_free(desc);
+ else
+ set_bit(FLAG_SYSFS, &desc->flags);
+
done:
if (status)
pr_debug("%s: status %d\n", __func__, status);
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index 740c1c0bd068..31cfe2c2a2af 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -74,12 +74,13 @@ config DRM_KUNIT_TEST_HELPERS
config DRM_KUNIT_TEST
tristate "KUnit tests for DRM" if !KUNIT_ALL_TESTS
- depends on DRM && KUNIT
+ depends on DRM && KUNIT && MMU
select DRM_BUDDY
select DRM_DISPLAY_DP_HELPER
select DRM_DISPLAY_HELPER
select DRM_EXEC
select DRM_EXPORT_FOR_TESTS if m
+ select DRM_GEM_SHMEM_HELPER
select DRM_KMS_HELPER
select DRM_KUNIT_TEST_HELPERS
select DRM_LIB_RANDOM
@@ -409,27 +410,6 @@ config DRM_HYPERV
If M is selected the module will be called hyperv_drm.
-# Keep legacy drivers last
-
-menuconfig DRM_LEGACY
- bool "Enable legacy drivers (DANGEROUS)"
- depends on DRM && MMU
- help
- Enable legacy DRI1 drivers. Those drivers expose unsafe and dangerous
- APIs to user-space, which can be used to circumvent access
- restrictions and other security measures. For backwards compatibility
- those drivers are still available, but their use is highly
- inadvisable and might harm your system.
-
- You are recommended to use the safe modeset-only drivers instead, and
- perform 3D emulation in user-space.
-
- Unless you have strong reasons to go rogue, say "N".
-
-if DRM_LEGACY
-# leave here to list legacy drivers
-endif # DRM_LEGACY
-
config DRM_EXPORT_FOR_TESTS
bool
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index b4cb0835620a..8ac6f4b9546e 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -47,18 +47,6 @@ drm-y := \
drm_vblank_work.o \
drm_vma_manager.o \
drm_writeback.o
-drm-$(CONFIG_DRM_LEGACY) += \
- drm_agpsupport.o \
- drm_bufs.o \
- drm_context.o \
- drm_dma.o \
- drm_hashtab.o \
- drm_irq.o \
- drm_legacy_misc.o \
- drm_lock.o \
- drm_memory.o \
- drm_scatter.o \
- drm_vm.o
drm-$(CONFIG_DRM_LIB_RANDOM) += lib/drm_random.o
drm-$(CONFIG_COMPAT) += drm_ioc32.o
drm-$(CONFIG_DRM_PANEL) += drm_panel.o
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
index e2ae9ba147ba..5cb33ac99f70 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
@@ -73,10 +73,10 @@ amdgpu_ctx_to_drm_sched_prio(int32_t ctx_prio)
return DRM_SCHED_PRIORITY_NORMAL;
case AMDGPU_CTX_PRIORITY_VERY_LOW:
- return DRM_SCHED_PRIORITY_MIN;
+ return DRM_SCHED_PRIORITY_LOW;
case AMDGPU_CTX_PRIORITY_LOW:
- return DRM_SCHED_PRIORITY_MIN;
+ return DRM_SCHED_PRIORITY_LOW;
case AMDGPU_CTX_PRIORITY_NORMAL:
return DRM_SCHED_PRIORITY_NORMAL;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
index 62bb7fc7448a..71a5cf37b472 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
@@ -325,7 +325,7 @@ void amdgpu_job_stop_all_jobs_on_sched(struct drm_gpu_scheduler *sched)
int i;
/* Signal all jobs not yet scheduled */
- for (i = sched->num_rqs - 1; i >= DRM_SCHED_PRIORITY_MIN; i--) {
+ for (i = DRM_SCHED_PRIORITY_KERNEL; i < sched->num_rqs; i++) {
struct drm_sched_rq *rq = sched->sched_rq[i];
spin_lock(&rq->lock);
list_for_each_entry(s_entity, &rq->entities, list) {
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 90868a54d4dd..8e6fe9860cd6 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -93,7 +93,6 @@
#include <drm/drm_vblank.h>
#include <drm/drm_audio_component.h>
#include <drm/drm_gem_atomic_helper.h>
-#include <drm/drm_plane_helper.h>
#include <acpi/video.h>
diff --git a/drivers/gpu/drm/armada/armada_crtc.c b/drivers/gpu/drm/armada/armada_crtc.c
index 52d2c942d3d2..c78687c755a8 100644
--- a/drivers/gpu/drm/armada/armada_crtc.c
+++ b/drivers/gpu/drm/armada/armada_crtc.c
@@ -7,8 +7,9 @@
#include <linux/clk.h>
#include <linux/component.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
+#include <linux/property.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
@@ -1012,26 +1013,17 @@ armada_lcd_bind(struct device *dev, struct device *master, void *data)
int irq = platform_get_irq(pdev, 0);
const struct armada_variant *variant;
struct device_node *port = NULL;
+ struct device_node *np, *parent = dev->of_node;
if (irq < 0)
return irq;
- if (!dev->of_node) {
- const struct platform_device_id *id;
- id = platform_get_device_id(pdev);
- if (!id)
- return -ENXIO;
-
- variant = (const struct armada_variant *)id->driver_data;
- } else {
- const struct of_device_id *match;
- struct device_node *np, *parent = dev->of_node;
-
- match = of_match_device(dev->driver->of_match_table, dev);
- if (!match)
- return -ENXIO;
+ variant = device_get_match_data(dev);
+ if (!variant)
+ return -ENXIO;
+ if (parent) {
np = of_get_child_by_name(parent, "ports");
if (np)
parent = np;
@@ -1041,8 +1033,6 @@ armada_lcd_bind(struct device *dev, struct device *master, void *data)
dev_err(dev, "no port node found in %pOF\n", parent);
return -ENXIO;
}
-
- variant = match->data;
}
return armada_drm_crtc_create(drm, dev, res, irq, variant, port);
diff --git a/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c b/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c
index 78122b35a0cb..a7a6b70220eb 100644
--- a/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c
+++ b/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c
@@ -6,10 +6,10 @@
#include <linux/irq.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/of_device.h>
+#include <linux/mod_devicetable.h>
#include <linux/of_reserved_mem.h>
#include <linux/platform_device.h>
+#include <linux/property.h>
#include <linux/regmap.h>
#include <linux/reset.h>
@@ -143,7 +143,6 @@ static int aspeed_gfx_load(struct drm_device *drm)
struct aspeed_gfx *priv = to_aspeed_gfx(drm);
struct device_node *np = pdev->dev.of_node;
const struct aspeed_gfx_config *config;
- const struct of_device_id *match;
struct resource *res;
int ret;
@@ -152,10 +151,9 @@ static int aspeed_gfx_load(struct drm_device *drm)
if (IS_ERR(priv->base))
return PTR_ERR(priv->base);
- match = of_match_device(aspeed_gfx_match, &pdev->dev);
- if (!match)
+ config = device_get_match_data(&pdev->dev);
+ if (!config)
return -EINVAL;
- config = match->data;
priv->dac_reg = config->dac_reg;
priv->int_clr_reg = config->int_clear_reg;
diff --git a/drivers/gpu/drm/ast/ast_drv.c b/drivers/gpu/drm/ast/ast_drv.c
index cf5b754f044c..90bcb1eb9cd9 100644
--- a/drivers/gpu/drm/ast/ast_drv.c
+++ b/drivers/gpu/drm/ast/ast_drv.c
@@ -89,11 +89,194 @@ static const struct pci_device_id ast_pciidlist[] = {
MODULE_DEVICE_TABLE(pci, ast_pciidlist);
+static bool ast_is_vga_enabled(void __iomem *ioregs)
+{
+ u8 vgaer = __ast_read8(ioregs, AST_IO_VGAER);
+
+ return vgaer & AST_IO_VGAER_VGA_ENABLE;
+}
+
+static void ast_enable_vga(void __iomem *ioregs)
+{
+ __ast_write8(ioregs, AST_IO_VGAER, AST_IO_VGAER_VGA_ENABLE);
+ __ast_write8(ioregs, AST_IO_VGAMR_W, AST_IO_VGAMR_IOSEL);
+}
+
+/*
+ * Run this function as part of the HW device cleanup; not
+ * when the DRM device gets released.
+ */
+static void ast_enable_mmio_release(void *data)
+{
+ void __iomem *ioregs = (void __force __iomem *)data;
+
+ /* enable standard VGA decode */
+ __ast_write8_i(ioregs, AST_IO_VGACRI, 0xa1, AST_IO_VGACRA1_MMIO_ENABLED);
+}
+
+static int ast_enable_mmio(struct device *dev, void __iomem *ioregs)
+{
+ void *data = (void __force *)ioregs;
+
+ __ast_write8_i(ioregs, AST_IO_VGACRI, 0xa1,
+ AST_IO_VGACRA1_MMIO_ENABLED |
+ AST_IO_VGACRA1_VGAIO_DISABLED);
+
+ return devm_add_action_or_reset(dev, ast_enable_mmio_release, data);
+}
+
+static void ast_open_key(void __iomem *ioregs)
+{
+ __ast_write8_i(ioregs, AST_IO_VGACRI, 0x80, AST_IO_VGACR80_PASSWORD);
+}
+
+static int ast_detect_chip(struct pci_dev *pdev,
+ void __iomem *regs, void __iomem *ioregs,
+ enum ast_chip *chip_out,
+ enum ast_config_mode *config_mode_out)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ enum ast_config_mode config_mode = ast_use_defaults;
+ uint32_t scu_rev = 0xffffffff;
+ enum ast_chip chip;
+ u32 data;
+ u8 vgacrd0, vgacrd1;
+
+ /*
+ * Find configuration mode and read SCU revision
+ */
+
+ /* Check if we have device-tree properties */
+ if (np && !of_property_read_u32(np, "aspeed,scu-revision-id", &data)) {
+ /* We do, disable P2A access */
+ config_mode = ast_use_dt;
+ scu_rev = data;
+ } else if (pdev->device == PCI_CHIP_AST2000) { // Not all families have a P2A bridge
+ /*
+ * The BMC will set SCU 0x40 D[12] to 1 if the P2 bridge
+ * is disabled. We force using P2A if VGA only mode bit
+ * is set D[7]
+ */
+ vgacrd0 = __ast_read8_i(ioregs, AST_IO_VGACRI, 0xd0);
+ vgacrd1 = __ast_read8_i(ioregs, AST_IO_VGACRI, 0xd1);
+ if (!(vgacrd0 & 0x80) || !(vgacrd1 & 0x10)) {
+
+ /*
+ * We have a P2A bridge and it is enabled.
+ */
+
+ /* Patch AST2500/AST2510 */
+ if ((pdev->revision & 0xf0) == 0x40) {
+ if (!(vgacrd0 & AST_VRAM_INIT_STATUS_MASK))
+ ast_patch_ahb_2500(regs);
+ }
+
+ /* Double check that it's actually working */
+ data = __ast_read32(regs, 0xf004);
+ if ((data != 0xffffffff) && (data != 0x00)) {
+ config_mode = ast_use_p2a;
+
+ /* Read SCU7c (silicon revision register) */
+ __ast_write32(regs, 0xf004, 0x1e6e0000);
+ __ast_write32(regs, 0xf000, 0x1);
+ scu_rev = __ast_read32(regs, 0x1207c);
+ }
+ }
+ }
+
+ switch (config_mode) {
+ case ast_use_defaults:
+ dev_info(dev, "Using default configuration\n");
+ break;
+ case ast_use_dt:
+ dev_info(dev, "Using device-tree for configuration\n");
+ break;
+ case ast_use_p2a:
+ dev_info(dev, "Using P2A bridge for configuration\n");
+ break;
+ }
+
+ /*
+ * Identify chipset
+ */
+
+ if (pdev->revision >= 0x50) {
+ chip = AST2600;
+ dev_info(dev, "AST 2600 detected\n");
+ } else if (pdev->revision >= 0x40) {
+ switch (scu_rev & 0x300) {
+ case 0x0100:
+ chip = AST2510;
+ dev_info(dev, "AST 2510 detected\n");
+ break;
+ default:
+ chip = AST2500;
+ dev_info(dev, "AST 2500 detected\n");
+ break;
+ }
+ } else if (pdev->revision >= 0x30) {
+ switch (scu_rev & 0x300) {
+ case 0x0100:
+ chip = AST1400;
+ dev_info(dev, "AST 1400 detected\n");
+ break;
+ default:
+ chip = AST2400;
+ dev_info(dev, "AST 2400 detected\n");
+ break;
+ }
+ } else if (pdev->revision >= 0x20) {
+ switch (scu_rev & 0x300) {
+ case 0x0000:
+ chip = AST1300;
+ dev_info(dev, "AST 1300 detected\n");
+ break;
+ default:
+ chip = AST2300;
+ dev_info(dev, "AST 2300 detected\n");
+ break;
+ }
+ } else if (pdev->revision >= 0x10) {
+ switch (scu_rev & 0x0300) {
+ case 0x0200:
+ chip = AST1100;
+ dev_info(dev, "AST 1100 detected\n");
+ break;
+ case 0x0100:
+ chip = AST2200;
+ dev_info(dev, "AST 2200 detected\n");
+ break;
+ case 0x0000:
+ chip = AST2150;
+ dev_info(dev, "AST 2150 detected\n");
+ break;
+ default:
+ chip = AST2100;
+ dev_info(dev, "AST 2100 detected\n");
+ break;
+ }
+ } else {
+ chip = AST2000;
+ dev_info(dev, "AST 2000 detected\n");
+ }
+
+ *chip_out = chip;
+ *config_mode_out = config_mode;
+
+ return 0;
+}
+
static int ast_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
- struct ast_device *ast;
- struct drm_device *dev;
+ struct device *dev = &pdev->dev;
int ret;
+ void __iomem *regs;
+ void __iomem *ioregs;
+ enum ast_config_mode config_mode;
+ enum ast_chip chip;
+ struct drm_device *drm;
+ bool need_post = false;
ret = drm_aperture_remove_conflicting_pci_framebuffers(pdev, &ast_driver);
if (ret)
@@ -103,16 +286,80 @@ static int ast_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (ret)
return ret;
- ast = ast_device_create(&ast_driver, pdev, ent->driver_data);
- if (IS_ERR(ast))
- return PTR_ERR(ast);
- dev = &ast->base;
+ regs = pcim_iomap(pdev, 1, 0);
+ if (!regs)
+ return -EIO;
+
+ if (pdev->revision >= 0x40) {
+ /*
+ * On AST2500 and later models, MMIO is enabled by
+ * default. Adopt it to be compatible with ARM.
+ */
+ resource_size_t len = pci_resource_len(pdev, 1);
+
+ if (len < AST_IO_MM_OFFSET)
+ return -EIO;
+ if ((len - AST_IO_MM_OFFSET) < AST_IO_MM_LENGTH)
+ return -EIO;
+ ioregs = regs + AST_IO_MM_OFFSET;
+ } else if (pci_resource_flags(pdev, 2) & IORESOURCE_IO) {
+ /*
+ * Map I/O registers if we have a PCI BAR for I/O.
+ */
+ resource_size_t len = pci_resource_len(pdev, 2);
+
+ if (len < AST_IO_MM_LENGTH)
+ return -EIO;
+ ioregs = pcim_iomap(pdev, 2, 0);
+ if (!ioregs)
+ return -EIO;
+ } else {
+ /*
+ * Anything else is best effort.
+ */
+ resource_size_t len = pci_resource_len(pdev, 1);
+
+ if (len < AST_IO_MM_OFFSET)
+ return -EIO;
+ if ((len - AST_IO_MM_OFFSET) < AST_IO_MM_LENGTH)
+ return -EIO;
+ ioregs = regs + AST_IO_MM_OFFSET;
+
+ dev_info(dev, "Platform has no I/O space, using MMIO\n");
+ }
+
+ if (!ast_is_vga_enabled(ioregs)) {
+ dev_info(dev, "VGA not enabled on entry, requesting chip POST\n");
+ need_post = true;
+ }
+
+ /*
+ * If VGA isn't enabled, we need to enable now or subsequent
+ * access to the scratch registers will fail.
+ */
+ if (need_post)
+ ast_enable_vga(ioregs);
+ /* Enable extended register access */
+ ast_open_key(ioregs);
+
+ ret = ast_enable_mmio(dev, ioregs);
+ if (ret)
+ return ret;
+
+ ret = ast_detect_chip(pdev, regs, ioregs, &chip, &config_mode);
+ if (ret)
+ return ret;
+
+ drm = ast_device_create(pdev, &ast_driver, chip, config_mode, regs, ioregs, need_post);
+ if (IS_ERR(drm))
+ return PTR_ERR(drm);
+ pci_set_drvdata(pdev, drm);
- ret = drm_dev_register(dev, ent->driver_data);
+ ret = drm_dev_register(drm, ent->driver_data);
if (ret)
return ret;
- drm_fbdev_generic_setup(dev, 32);
+ drm_fbdev_generic_setup(drm, 32);
return 0;
}
diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h
index 772f3b049c16..3be5ccf1f5f4 100644
--- a/drivers/gpu/drm/ast/ast_drv.h
+++ b/drivers/gpu/drm/ast/ast_drv.h
@@ -98,6 +98,12 @@ enum ast_tx_chip {
#define AST_TX_DP501_BIT BIT(AST_TX_DP501)
#define AST_TX_ASTDP_BIT BIT(AST_TX_ASTDP)
+enum ast_config_mode {
+ ast_use_p2a,
+ ast_use_dt,
+ ast_use_defaults
+};
+
#define AST_DRAM_512Mx16 0
#define AST_DRAM_1Gx16 1
#define AST_DRAM_512Mx32 2
@@ -192,12 +198,13 @@ to_ast_bmc_connector(struct drm_connector *connector)
struct ast_device {
struct drm_device base;
- struct mutex ioregs_lock; /* Protects access to I/O registers in ioregs */
void __iomem *regs;
void __iomem *ioregs;
void __iomem *dp501_fw_buf;
+ enum ast_config_mode config_mode;
enum ast_chip chip;
+
uint32_t dram_bus_width;
uint32_t dram_type;
uint32_t mclk;
@@ -207,6 +214,8 @@ struct ast_device {
unsigned long vram_size;
unsigned long vram_fb_available;
+ struct mutex modeset_lock; /* Protects access to modeset I/O registers in ioregs */
+
struct ast_plane primary_plane;
struct ast_plane cursor_plane;
struct drm_crtc crtc;
@@ -234,11 +243,6 @@ struct ast_device {
} output;
bool support_wide_screen;
- enum {
- ast_use_p2a,
- ast_use_dt,
- ast_use_defaults
- } config_mode;
unsigned long tx_chip_types; /* bitfield of enum ast_chip_type */
u8 *dp501_fw_addr;
@@ -250,9 +254,13 @@ static inline struct ast_device *to_ast_device(struct drm_device *dev)
return container_of(dev, struct ast_device, base);
}
-struct ast_device *ast_device_create(const struct drm_driver *drv,
- struct pci_dev *pdev,
- unsigned long flags);
+struct drm_device *ast_device_create(struct pci_dev *pdev,
+ const struct drm_driver *drv,
+ enum ast_chip chip,
+ enum ast_config_mode config_mode,
+ void __iomem *regs,
+ void __iomem *ioregs,
+ bool need_post);
static inline unsigned long __ast_gen(struct ast_device *ast)
{
@@ -272,55 +280,94 @@ static inline bool __ast_gen_is_eq(struct ast_device *ast, unsigned long gen)
#define IS_AST_GEN6(__ast) __ast_gen_is_eq(__ast, 6)
#define IS_AST_GEN7(__ast) __ast_gen_is_eq(__ast, 7)
+static inline u8 __ast_read8(const void __iomem *addr, u32 reg)
+{
+ return ioread8(addr + reg);
+}
+
+static inline u32 __ast_read32(const void __iomem *addr, u32 reg)
+{
+ return ioread32(addr + reg);
+}
+
+static inline void __ast_write8(void __iomem *addr, u32 reg, u8 val)
+{
+ iowrite8(val, addr + reg);
+}
+
+static inline void __ast_write32(void __iomem *addr, u32 reg, u32 val)
+{
+ iowrite32(val, addr + reg);
+}
+
+static inline u8 __ast_read8_i(void __iomem *addr, u32 reg, u8 index)
+{
+ __ast_write8(addr, reg, index);
+ return __ast_read8(addr, reg + 1);
+}
+
+static inline u8 __ast_read8_i_masked(void __iomem *addr, u32 reg, u8 index, u8 read_mask)
+{
+ u8 val = __ast_read8_i(addr, reg, index);
+
+ return val & read_mask;
+}
+
+static inline void __ast_write8_i(void __iomem *addr, u32 reg, u8 index, u8 val)
+{
+ __ast_write8(addr, reg, index);
+ __ast_write8(addr, reg + 1, val);
+}
+
+static inline void __ast_write8_i_masked(void __iomem *addr, u32 reg, u8 index, u8 read_mask,
+ u8 val)
+{
+ u8 tmp = __ast_read8_i_masked(addr, reg, index, read_mask);
+
+ tmp |= val;
+ __ast_write8_i(addr, reg, index, tmp);
+}
+
static inline u32 ast_read32(struct ast_device *ast, u32 reg)
{
- return ioread32(ast->regs + reg);
+ return __ast_read32(ast->regs, reg);
}
static inline void ast_write32(struct ast_device *ast, u32 reg, u32 val)
{
- iowrite32(val, ast->regs + reg);
+ __ast_write32(ast->regs, reg, val);
}
static inline u8 ast_io_read8(struct ast_device *ast, u32 reg)
{
- return ioread8(ast->ioregs + reg);
+ return __ast_read8(ast->ioregs, reg);
}
static inline void ast_io_write8(struct ast_device *ast, u32 reg, u8 val)
{
- iowrite8(val, ast->ioregs + reg);
+ __ast_write8(ast->ioregs, reg, val);
}
static inline u8 ast_get_index_reg(struct ast_device *ast, u32 base, u8 index)
{
- ast_io_write8(ast, base, index);
- ++base;
- return ast_io_read8(ast, base);
+ return __ast_read8_i(ast->ioregs, base, index);
}
static inline u8 ast_get_index_reg_mask(struct ast_device *ast, u32 base, u8 index,
u8 preserve_mask)
{
- u8 val = ast_get_index_reg(ast, base, index);
-
- return val & preserve_mask;
+ return __ast_read8_i_masked(ast->ioregs, base, index, preserve_mask);
}
static inline void ast_set_index_reg(struct ast_device *ast, u32 base, u8 index, u8 val)
{
- ast_io_write8(ast, base, index);
- ++base;
- ast_io_write8(ast, base, val);
+ __ast_write8_i(ast->ioregs, base, index, val);
}
static inline void ast_set_index_reg_mask(struct ast_device *ast, u32 base, u8 index,
u8 preserve_mask, u8 val)
{
- u8 tmp = ast_get_index_reg_mask(ast, base, index, preserve_mask);
-
- tmp |= val;
- ast_set_index_reg(ast, base, index, tmp);
+ __ast_write8_i_masked(ast->ioregs, base, index, preserve_mask, val);
}
#define AST_VIDMEM_SIZE_8M 0x00800000
@@ -442,7 +489,7 @@ int ast_mm_init(struct ast_device *ast);
void ast_post_gpu(struct drm_device *dev);
u32 ast_mindwm(struct ast_device *ast, u32 r);
void ast_moutdwm(struct ast_device *ast, u32 r, u32 v);
-void ast_patch_ahb_2500(struct ast_device *ast);
+void ast_patch_ahb_2500(void __iomem *regs);
/* ast dp501 */
void ast_set_dp501_video_output(struct drm_device *dev, u8 mode);
bool ast_backup_fw(struct drm_device *dev, u8 *addr, u32 size);
diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c
index f4ab40e22cea..2f3ad5f949fc 100644
--- a/drivers/gpu/drm/ast/ast_main.c
+++ b/drivers/gpu/drm/ast/ast_main.c
@@ -35,180 +35,6 @@
#include "ast_drv.h"
-static bool ast_is_vga_enabled(struct drm_device *dev)
-{
- struct ast_device *ast = to_ast_device(dev);
- u8 ch;
-
- ch = ast_io_read8(ast, AST_IO_VGAER);
-
- return !!(ch & 0x01);
-}
-
-static void ast_enable_vga(struct drm_device *dev)
-{
- struct ast_device *ast = to_ast_device(dev);
-
- ast_io_write8(ast, AST_IO_VGAER, 0x01);
- ast_io_write8(ast, AST_IO_VGAMR_W, 0x01);
-}
-
-/*
- * Run this function as part of the HW device cleanup; not
- * when the DRM device gets released.
- */
-static void ast_enable_mmio_release(void *data)
-{
- struct ast_device *ast = data;
-
- /* enable standard VGA decode */
- ast_set_index_reg(ast, AST_IO_VGACRI, 0xa1, 0x04);
-}
-
-static int ast_enable_mmio(struct ast_device *ast)
-{
- struct drm_device *dev = &ast->base;
-
- ast_set_index_reg(ast, AST_IO_VGACRI, 0xa1, 0x06);
-
- return devm_add_action_or_reset(dev->dev, ast_enable_mmio_release, ast);
-}
-
-static void ast_open_key(struct ast_device *ast)
-{
- ast_set_index_reg(ast, AST_IO_VGACRI, 0x80, 0xA8);
-}
-
-static int ast_device_config_init(struct ast_device *ast)
-{
- struct drm_device *dev = &ast->base;
- struct pci_dev *pdev = to_pci_dev(dev->dev);
- struct device_node *np = dev->dev->of_node;
- uint32_t scu_rev = 0xffffffff;
- u32 data;
- u8 jregd0, jregd1;
-
- /*
- * Find configuration mode and read SCU revision
- */
-
- ast->config_mode = ast_use_defaults;
-
- /* Check if we have device-tree properties */
- if (np && !of_property_read_u32(np, "aspeed,scu-revision-id", &data)) {
- /* We do, disable P2A access */
- ast->config_mode = ast_use_dt;
- scu_rev = data;
- } else if (pdev->device == PCI_CHIP_AST2000) { // Not all families have a P2A bridge
- /*
- * The BMC will set SCU 0x40 D[12] to 1 if the P2 bridge
- * is disabled. We force using P2A if VGA only mode bit
- * is set D[7]
- */
- jregd0 = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xd0, 0xff);
- jregd1 = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xd1, 0xff);
- if (!(jregd0 & 0x80) || !(jregd1 & 0x10)) {
-
- /*
- * We have a P2A bridge and it is enabled.
- */
-
- /* Patch AST2500/AST2510 */
- if ((pdev->revision & 0xf0) == 0x40) {
- if (!(jregd0 & AST_VRAM_INIT_STATUS_MASK))
- ast_patch_ahb_2500(ast);
- }
-
- /* Double check that it's actually working */
- data = ast_read32(ast, 0xf004);
- if ((data != 0xffffffff) && (data != 0x00)) {
- ast->config_mode = ast_use_p2a;
-
- /* Read SCU7c (silicon revision register) */
- ast_write32(ast, 0xf004, 0x1e6e0000);
- ast_write32(ast, 0xf000, 0x1);
- scu_rev = ast_read32(ast, 0x1207c);
- }
- }
- }
-
- switch (ast->config_mode) {
- case ast_use_defaults:
- drm_info(dev, "Using default configuration\n");
- break;
- case ast_use_dt:
- drm_info(dev, "Using device-tree for configuration\n");
- break;
- case ast_use_p2a:
- drm_info(dev, "Using P2A bridge for configuration\n");
- break;
- }
-
- /*
- * Identify chipset
- */
-
- if (pdev->revision >= 0x50) {
- ast->chip = AST2600;
- drm_info(dev, "AST 2600 detected\n");
- } else if (pdev->revision >= 0x40) {
- switch (scu_rev & 0x300) {
- case 0x0100:
- ast->chip = AST2510;
- drm_info(dev, "AST 2510 detected\n");
- break;
- default:
- ast->chip = AST2500;
- drm_info(dev, "AST 2500 detected\n");
- }
- } else if (pdev->revision >= 0x30) {
- switch (scu_rev & 0x300) {
- case 0x0100:
- ast->chip = AST1400;
- drm_info(dev, "AST 1400 detected\n");
- break;
- default:
- ast->chip = AST2400;
- drm_info(dev, "AST 2400 detected\n");
- }
- } else if (pdev->revision >= 0x20) {
- switch (scu_rev & 0x300) {
- case 0x0000:
- ast->chip = AST1300;
- drm_info(dev, "AST 1300 detected\n");
- break;
- default:
- ast->chip = AST2300;
- drm_info(dev, "AST 2300 detected\n");
- break;
- }
- } else if (pdev->revision >= 0x10) {
- switch (scu_rev & 0x0300) {
- case 0x0200:
- ast->chip = AST1100;
- drm_info(dev, "AST 1100 detected\n");
- break;
- case 0x0100:
- ast->chip = AST2200;
- drm_info(dev, "AST 2200 detected\n");
- break;
- case 0x0000:
- ast->chip = AST2150;
- drm_info(dev, "AST 2150 detected\n");
- break;
- default:
- ast->chip = AST2100;
- drm_info(dev, "AST 2100 detected\n");
- break;
- }
- } else {
- ast->chip = AST2000;
- drm_info(dev, "AST 2000 detected\n");
- }
-
- return 0;
-}
-
static void ast_detect_widescreen(struct ast_device *ast)
{
u8 jreg;
@@ -424,69 +250,27 @@ static int ast_get_dram_info(struct drm_device *dev)
return 0;
}
-struct ast_device *ast_device_create(const struct drm_driver *drv,
- struct pci_dev *pdev,
- unsigned long flags)
+struct drm_device *ast_device_create(struct pci_dev *pdev,
+ const struct drm_driver *drv,
+ enum ast_chip chip,
+ enum ast_config_mode config_mode,
+ void __iomem *regs,
+ void __iomem *ioregs,
+ bool need_post)
{
struct drm_device *dev;
struct ast_device *ast;
- bool need_post = false;
- int ret = 0;
+ int ret;
ast = devm_drm_dev_alloc(&pdev->dev, drv, struct ast_device, base);
if (IS_ERR(ast))
- return ast;
+ return ERR_CAST(ast);
dev = &ast->base;
- pci_set_drvdata(pdev, dev);
-
- ret = drmm_mutex_init(dev, &ast->ioregs_lock);
- if (ret)
- return ERR_PTR(ret);
-
- ast->regs = pcim_iomap(pdev, 1, 0);
- if (!ast->regs)
- return ERR_PTR(-EIO);
-
- /*
- * After AST2500, MMIO is enabled by default, and it should be adopted
- * to be compatible with Arm.
- */
- if (pdev->revision >= 0x40) {
- ast->ioregs = ast->regs + AST_IO_MM_OFFSET;
- } else if (!(pci_resource_flags(pdev, 2) & IORESOURCE_IO)) {
- drm_info(dev, "platform has no IO space, trying MMIO\n");
- ast->ioregs = ast->regs + AST_IO_MM_OFFSET;
- }
-
- /* "map" IO regs if the above hasn't done so already */
- if (!ast->ioregs) {
- ast->ioregs = pcim_iomap(pdev, 2, 0);
- if (!ast->ioregs)
- return ERR_PTR(-EIO);
- }
-
- if (!ast_is_vga_enabled(dev)) {
- drm_info(dev, "VGA not enabled on entry, requesting chip POST\n");
- need_post = true;
- }
-
- /*
- * If VGA isn't enabled, we need to enable now or subsequent
- * access to the scratch registers will fail.
- */
- if (need_post)
- ast_enable_vga(dev);
-
- /* Enable extended register access */
- ast_open_key(ast);
- ret = ast_enable_mmio(ast);
- if (ret)
- return ERR_PTR(ret);
-
- ret = ast_device_config_init(ast);
- if (ret)
- return ERR_PTR(ret);
+ ast->chip = chip;
+ ast->config_mode = config_mode;
+ ast->regs = regs;
+ ast->ioregs = ioregs;
ast_detect_widescreen(ast);
ast_detect_tx_chip(ast, need_post);
@@ -517,5 +301,5 @@ struct ast_device *ast_device_create(const struct drm_driver *drv,
if (ret)
return ERR_PTR(ret);
- return ast;
+ return dev;
}
diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c
index c20534d0ef7c..a718646a66b8 100644
--- a/drivers/gpu/drm/ast/ast_mode.c
+++ b/drivers/gpu/drm/ast/ast_mode.c
@@ -1358,13 +1358,13 @@ static int ast_vga_connector_helper_get_modes(struct drm_connector *connector)
* Protect access to I/O registers from concurrent modesetting
* by acquiring the I/O-register lock.
*/
- mutex_lock(&ast->ioregs_lock);
+ mutex_lock(&ast->modeset_lock);
edid = drm_get_edid(connector, &ast_vga_connector->i2c->adapter);
if (!edid)
goto err_mutex_unlock;
- mutex_unlock(&ast->ioregs_lock);
+ mutex_unlock(&ast->modeset_lock);
count = drm_add_edid_modes(connector, edid);
kfree(edid);
@@ -1372,7 +1372,7 @@ static int ast_vga_connector_helper_get_modes(struct drm_connector *connector)
return count;
err_mutex_unlock:
- mutex_unlock(&ast->ioregs_lock);
+ mutex_unlock(&ast->modeset_lock);
err_drm_connector_update_edid_property:
drm_connector_update_edid_property(connector, NULL);
return 0;
@@ -1464,13 +1464,13 @@ static int ast_sil164_connector_helper_get_modes(struct drm_connector *connector
* Protect access to I/O registers from concurrent modesetting
* by acquiring the I/O-register lock.
*/
- mutex_lock(&ast->ioregs_lock);
+ mutex_lock(&ast->modeset_lock);
edid = drm_get_edid(connector, &ast_sil164_connector->i2c->adapter);
if (!edid)
goto err_mutex_unlock;
- mutex_unlock(&ast->ioregs_lock);
+ mutex_unlock(&ast->modeset_lock);
count = drm_add_edid_modes(connector, edid);
kfree(edid);
@@ -1478,7 +1478,7 @@ static int ast_sil164_connector_helper_get_modes(struct drm_connector *connector
return count;
err_mutex_unlock:
- mutex_unlock(&ast->ioregs_lock);
+ mutex_unlock(&ast->modeset_lock);
err_drm_connector_update_edid_property:
drm_connector_update_edid_property(connector, NULL);
return 0;
@@ -1670,13 +1670,13 @@ static int ast_astdp_connector_helper_get_modes(struct drm_connector *connector)
* Protect access to I/O registers from concurrent modesetting
* by acquiring the I/O-register lock.
*/
- mutex_lock(&ast->ioregs_lock);
+ mutex_lock(&ast->modeset_lock);
succ = ast_astdp_read_edid(connector->dev, edid);
if (succ < 0)
goto err_mutex_unlock;
- mutex_unlock(&ast->ioregs_lock);
+ mutex_unlock(&ast->modeset_lock);
drm_connector_update_edid_property(connector, edid);
count = drm_add_edid_modes(connector, edid);
@@ -1685,7 +1685,7 @@ static int ast_astdp_connector_helper_get_modes(struct drm_connector *connector)
return count;
err_mutex_unlock:
- mutex_unlock(&ast->ioregs_lock);
+ mutex_unlock(&ast->modeset_lock);
kfree(edid);
err_drm_connector_update_edid_property:
drm_connector_update_edid_property(connector, NULL);
@@ -1870,9 +1870,9 @@ static void ast_mode_config_helper_atomic_commit_tail(struct drm_atomic_state *s
* display modes. Protect access to I/O registers by acquiring
* the I/O-register lock. Released in atomic_flush().
*/
- mutex_lock(&ast->ioregs_lock);
+ mutex_lock(&ast->modeset_lock);
drm_atomic_helper_commit_tail_rpm(state);
- mutex_unlock(&ast->ioregs_lock);
+ mutex_unlock(&ast->modeset_lock);
}
static const struct drm_mode_config_helper_funcs ast_mode_config_helper_funcs = {
@@ -1910,6 +1910,10 @@ int ast_mode_config_init(struct ast_device *ast)
struct drm_connector *physical_connector = NULL;
int ret;
+ ret = drmm_mutex_init(dev, &ast->modeset_lock);
+ if (ret)
+ return ret;
+
ret = drmm_mode_config_init(dev);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/ast/ast_post.c b/drivers/gpu/drm/ast/ast_post.c
index 7a993a384314..22f548805dfb 100644
--- a/drivers/gpu/drm/ast/ast_post.c
+++ b/drivers/gpu/drm/ast/ast_post.c
@@ -77,28 +77,42 @@ ast_set_def_ext_reg(struct drm_device *dev)
ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xb6, 0xff, reg);
}
-u32 ast_mindwm(struct ast_device *ast, u32 r)
+static u32 __ast_mindwm(void __iomem *regs, u32 r)
{
- uint32_t data;
+ u32 data;
- ast_write32(ast, 0xf004, r & 0xffff0000);
- ast_write32(ast, 0xf000, 0x1);
+ __ast_write32(regs, 0xf004, r & 0xffff0000);
+ __ast_write32(regs, 0xf000, 0x1);
do {
- data = ast_read32(ast, 0xf004) & 0xffff0000;
+ data = __ast_read32(regs, 0xf004) & 0xffff0000;
} while (data != (r & 0xffff0000));
- return ast_read32(ast, 0x10000 + (r & 0x0000ffff));
+
+ return __ast_read32(regs, 0x10000 + (r & 0x0000ffff));
}
-void ast_moutdwm(struct ast_device *ast, u32 r, u32 v)
+static void __ast_moutdwm(void __iomem *regs, u32 r, u32 v)
{
- uint32_t data;
- ast_write32(ast, 0xf004, r & 0xffff0000);
- ast_write32(ast, 0xf000, 0x1);
+ u32 data;
+
+ __ast_write32(regs, 0xf004, r & 0xffff0000);
+ __ast_write32(regs, 0xf000, 0x1);
+
do {
- data = ast_read32(ast, 0xf004) & 0xffff0000;
+ data = __ast_read32(regs, 0xf004) & 0xffff0000;
} while (data != (r & 0xffff0000));
- ast_write32(ast, 0x10000 + (r & 0x0000ffff), v);
+
+ __ast_write32(regs, 0x10000 + (r & 0x0000ffff), v);
+}
+
+u32 ast_mindwm(struct ast_device *ast, u32 r)
+{
+ return __ast_mindwm(ast->regs, r);
+}
+
+void ast_moutdwm(struct ast_device *ast, u32 r, u32 v)
+{
+ __ast_moutdwm(ast->regs, r, v);
}
/*
@@ -1987,17 +2001,18 @@ static bool ast_dram_init_2500(struct ast_device *ast)
return true;
}
-void ast_patch_ahb_2500(struct ast_device *ast)
+void ast_patch_ahb_2500(void __iomem *regs)
{
- u32 data;
+ u32 data;
/* Clear bus lock condition */
- ast_moutdwm(ast, 0x1e600000, 0xAEED1A03);
- ast_moutdwm(ast, 0x1e600084, 0x00010000);
- ast_moutdwm(ast, 0x1e600088, 0x00000000);
- ast_moutdwm(ast, 0x1e6e2000, 0x1688A8A8);
- data = ast_mindwm(ast, 0x1e6e2070);
- if (data & 0x08000000) { /* check fast reset */
+ __ast_moutdwm(regs, 0x1e600000, 0xAEED1A03);
+ __ast_moutdwm(regs, 0x1e600084, 0x00010000);
+ __ast_moutdwm(regs, 0x1e600088, 0x00000000);
+ __ast_moutdwm(regs, 0x1e6e2000, 0x1688A8A8);
+
+ data = __ast_mindwm(regs, 0x1e6e2070);
+ if (data & 0x08000000) { /* check fast reset */
/*
* If "Fast restet" is enabled for ARM-ICE debugger,
* then WDT needs to enable, that
@@ -2009,16 +2024,18 @@ void ast_patch_ahb_2500(struct ast_device *ast)
* [1]:= 1:WDT will be cleeared and disabled after timeout occurs
* [0]:= 1:WDT enable
*/
- ast_moutdwm(ast, 0x1E785004, 0x00000010);
- ast_moutdwm(ast, 0x1E785008, 0x00004755);
- ast_moutdwm(ast, 0x1E78500c, 0x00000033);
+ __ast_moutdwm(regs, 0x1E785004, 0x00000010);
+ __ast_moutdwm(regs, 0x1E785008, 0x00004755);
+ __ast_moutdwm(regs, 0x1E78500c, 0x00000033);
udelay(1000);
}
+
do {
- ast_moutdwm(ast, 0x1e6e2000, 0x1688A8A8);
- data = ast_mindwm(ast, 0x1e6e2000);
- } while (data != 1);
- ast_moutdwm(ast, 0x1e6e207c, 0x08000000); /* clear fast reset */
+ __ast_moutdwm(regs, 0x1e6e2000, 0x1688A8A8);
+ data = __ast_mindwm(regs, 0x1e6e2000);
+ } while (data != 1);
+
+ __ast_moutdwm(regs, 0x1e6e207c, 0x08000000); /* clear fast reset */
}
void ast_post_chip_2500(struct drm_device *dev)
@@ -2030,7 +2047,7 @@ void ast_post_chip_2500(struct drm_device *dev)
reg = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xd0, 0xff);
if ((reg & AST_VRAM_INIT_STATUS_MASK) == 0) {/* vga only */
/* Clear bus lock condition */
- ast_patch_ahb_2500(ast);
+ ast_patch_ahb_2500(ast->regs);
/* Disable watchdog */
ast_moutdwm(ast, 0x1E78502C, 0x00000000);
diff --git a/drivers/gpu/drm/ast/ast_reg.h b/drivers/gpu/drm/ast/ast_reg.h
index 555286ecf520..62dddbf3fe56 100644
--- a/drivers/gpu/drm/ast/ast_reg.h
+++ b/drivers/gpu/drm/ast/ast_reg.h
@@ -10,10 +10,17 @@
*/
#define AST_IO_MM_OFFSET (0x380)
+#define AST_IO_MM_LENGTH (128)
#define AST_IO_VGAARI_W (0x40)
+
#define AST_IO_VGAMR_W (0x42)
+#define AST_IO_VGAMR_R (0x4c)
+#define AST_IO_VGAMR_IOSEL BIT(0)
+
#define AST_IO_VGAER (0x43)
+#define AST_IO_VGAER_VGA_ENABLE BIT(0)
+
#define AST_IO_VGASRI (0x44)
#define AST_IO_VGADRR (0x47)
#define AST_IO_VGADWR (0x48)
@@ -21,14 +28,15 @@
#define AST_IO_VGAGRI (0x4E)
#define AST_IO_VGACRI (0x54)
+#define AST_IO_VGACR80_PASSWORD (0xa8)
+#define AST_IO_VGACRA1_VGAIO_DISABLED BIT(1)
+#define AST_IO_VGACRA1_MMIO_ENABLED BIT(2)
#define AST_IO_VGACRCB_HWC_16BPP BIT(0) /* set: ARGB4444, cleared: 2bpp palette */
#define AST_IO_VGACRCB_HWC_ENABLED BIT(1)
#define AST_IO_VGAIR1_R (0x5A)
#define AST_IO_VGAIR1_VREFRESH BIT(3)
-#define AST_IO_VGAMR_R (0x4C)
-
/*
* Display Transmitter Type
*/
diff --git a/drivers/gpu/drm/bridge/Kconfig b/drivers/gpu/drm/bridge/Kconfig
index ba82a1142adf..efd996f6c138 100644
--- a/drivers/gpu/drm/bridge/Kconfig
+++ b/drivers/gpu/drm/bridge/Kconfig
@@ -12,6 +12,23 @@ config DRM_PANEL_BRIDGE
help
DRM bridge wrapper of DRM panels
+config DRM_AUX_BRIDGE
+ tristate
+ depends on DRM_BRIDGE && OF
+ select AUXILIARY_BUS
+ select DRM_PANEL_BRIDGE
+ help
+ Simple transparent bridge that is used by several non-DRM drivers to
+ build bridges chain.
+
+config DRM_AUX_HPD_BRIDGE
+ tristate
+ depends on DRM_BRIDGE && OF
+ select AUXILIARY_BUS
+ help
+ Simple bridge that terminates the bridge chain and provides HPD
+ support.
+
menu "Display Interface Bridges"
depends on DRM && DRM_BRIDGE
@@ -313,6 +330,7 @@ config DRM_TOSHIBA_TC358768
select REGMAP_I2C
select DRM_PANEL
select DRM_MIPI_DSI
+ select VIDEOMODE_HELPERS
help
Toshiba TC358768AXBG/TC358778XBG DSI bridge chip driver.
diff --git a/drivers/gpu/drm/bridge/Makefile b/drivers/gpu/drm/bridge/Makefile
index 2b892b7ed59e..017b5832733b 100644
--- a/drivers/gpu/drm/bridge/Makefile
+++ b/drivers/gpu/drm/bridge/Makefile
@@ -1,4 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_DRM_AUX_BRIDGE) += aux-bridge.o
+obj-$(CONFIG_DRM_AUX_HPD_BRIDGE) += aux-hpd-bridge.o
obj-$(CONFIG_DRM_CHIPONE_ICN6211) += chipone-icn6211.o
obj-$(CONFIG_DRM_CHRONTEL_CH7033) += chrontel-ch7033.o
obj-$(CONFIG_DRM_CROS_EC_ANX7688) += cros-ec-anx7688.o
diff --git a/drivers/gpu/drm/bridge/analogix/anx7625.c b/drivers/gpu/drm/bridge/analogix/anx7625.c
index 8f740154707d..ef31033439bc 100644
--- a/drivers/gpu/drm/bridge/analogix/anx7625.c
+++ b/drivers/gpu/drm/bridge/analogix/anx7625.c
@@ -1298,10 +1298,32 @@ static void anx7625_config(struct anx7625_data *ctx)
XTAL_FRQ_SEL, XTAL_FRQ_27M);
}
+static int anx7625_hpd_timer_config(struct anx7625_data *ctx)
+{
+ int ret;
+
+ /* Set irq detect window to 2ms */
+ ret = anx7625_reg_write(ctx, ctx->i2c.tx_p2_client,
+ HPD_DET_TIMER_BIT0_7, HPD_TIME & 0xFF);
+ ret |= anx7625_reg_write(ctx, ctx->i2c.tx_p2_client,
+ HPD_DET_TIMER_BIT8_15,
+ (HPD_TIME >> 8) & 0xFF);
+ ret |= anx7625_reg_write(ctx, ctx->i2c.tx_p2_client,
+ HPD_DET_TIMER_BIT16_23,
+ (HPD_TIME >> 16) & 0xFF);
+
+ return ret;
+}
+
+static int anx7625_read_hpd_gpio_config_status(struct anx7625_data *ctx)
+{
+ return anx7625_reg_read(ctx, ctx->i2c.rx_p0_client, GPIO_CTRL_2);
+}
+
static void anx7625_disable_pd_protocol(struct anx7625_data *ctx)
{
struct device *dev = ctx->dev;
- int ret;
+ int ret, val;
/* Reset main ocm */
ret = anx7625_reg_write(ctx, ctx->i2c.rx_p0_client, 0x88, 0x40);
@@ -1315,6 +1337,19 @@ static void anx7625_disable_pd_protocol(struct anx7625_data *ctx)
DRM_DEV_DEBUG_DRIVER(dev, "disable PD feature fail.\n");
else
DRM_DEV_DEBUG_DRIVER(dev, "disable PD feature succeeded.\n");
+
+ /*
+ * Make sure the HPD GPIO already be configured after OCM release before
+ * setting HPD detect window register. Here we poll the status register
+ * at maximum 40ms, then config HPD irq detect window register
+ */
+ readx_poll_timeout(anx7625_read_hpd_gpio_config_status,
+ ctx, val,
+ ((val & HPD_SOURCE) || (val < 0)),
+ 2000, 2000 * 20);
+
+ /* Set HPD irq detect window to 2ms */
+ anx7625_hpd_timer_config(ctx);
}
static int anx7625_ocm_loading_check(struct anx7625_data *ctx)
@@ -1437,20 +1472,6 @@ static void anx7625_start_dp_work(struct anx7625_data *ctx)
static int anx7625_read_hpd_status_p0(struct anx7625_data *ctx)
{
- int ret;
-
- /* Set irq detect window to 2ms */
- ret = anx7625_reg_write(ctx, ctx->i2c.tx_p2_client,
- HPD_DET_TIMER_BIT0_7, HPD_TIME & 0xFF);
- ret |= anx7625_reg_write(ctx, ctx->i2c.tx_p2_client,
- HPD_DET_TIMER_BIT8_15,
- (HPD_TIME >> 8) & 0xFF);
- ret |= anx7625_reg_write(ctx, ctx->i2c.tx_p2_client,
- HPD_DET_TIMER_BIT16_23,
- (HPD_TIME >> 16) & 0xFF);
- if (ret < 0)
- return ret;
-
return anx7625_reg_read(ctx, ctx->i2c.rx_p0_client, SYSTEM_STSTUS);
}
@@ -1464,9 +1485,6 @@ static int _anx7625_hpd_polling(struct anx7625_data *ctx,
if (ctx->pdata.intp_irq)
return 0;
- /* Delay 200ms for FW HPD de-bounce */
- msleep(200);
-
ret = readx_poll_timeout(anx7625_read_hpd_status_p0,
ctx, val,
((val & HPD_STATUS) || (val < 0)),
diff --git a/drivers/gpu/drm/bridge/analogix/anx7625.h b/drivers/gpu/drm/bridge/analogix/anx7625.h
index 5af819611ebc..66ebee7f3d83 100644
--- a/drivers/gpu/drm/bridge/analogix/anx7625.h
+++ b/drivers/gpu/drm/bridge/analogix/anx7625.h
@@ -259,6 +259,10 @@
#define AP_MIPI_RX_EN BIT(5) /* 1: MIPI RX input in 0: no RX in */
#define AP_DISABLE_PD BIT(6)
#define AP_DISABLE_DISPLAY BIT(7)
+
+#define GPIO_CTRL_2 0x49
+#define HPD_SOURCE BIT(6)
+
/***************************************************************/
/* Register definition of device address 0x84 */
#define MIPI_PHY_CONTROL_3 0x03
diff --git a/drivers/gpu/drm/bridge/aux-bridge.c b/drivers/gpu/drm/bridge/aux-bridge.c
new file mode 100644
index 000000000000..49d7c2ab1ecc
--- /dev/null
+++ b/drivers/gpu/drm/bridge/aux-bridge.c
@@ -0,0 +1,140 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2023 Linaro Ltd.
+ *
+ * Author: Dmitry Baryshkov <[email protected]>
+ */
+#include <linux/auxiliary_bus.h>
+#include <linux/module.h>
+
+#include <drm/drm_bridge.h>
+#include <drm/bridge/aux-bridge.h>
+
+static DEFINE_IDA(drm_aux_bridge_ida);
+
+static void drm_aux_bridge_release(struct device *dev)
+{
+ struct auxiliary_device *adev = to_auxiliary_dev(dev);
+
+ ida_free(&drm_aux_bridge_ida, adev->id);
+
+ kfree(adev);
+}
+
+static void drm_aux_bridge_unregister_adev(void *_adev)
+{
+ struct auxiliary_device *adev = _adev;
+
+ auxiliary_device_delete(adev);
+ auxiliary_device_uninit(adev);
+}
+
+/**
+ * drm_aux_bridge_register - Create a simple bridge device to link the chain
+ * @parent: device instance providing this bridge
+ *
+ * Creates a simple DRM bridge that doesn't implement any drm_bridge
+ * operations. Such bridges merely fill a place in the bridge chain linking
+ * surrounding DRM bridges.
+ *
+ * Return: zero on success, negative error code on failure
+ */
+int drm_aux_bridge_register(struct device *parent)
+{
+ struct auxiliary_device *adev;
+ int ret;
+
+ adev = kzalloc(sizeof(*adev), GFP_KERNEL);
+ if (!adev)
+ return -ENOMEM;
+
+ ret = ida_alloc(&drm_aux_bridge_ida, GFP_KERNEL);
+ if (ret < 0) {
+ kfree(adev);
+ return ret;
+ }
+
+ adev->id = ret;
+ adev->name = "aux_bridge";
+ adev->dev.parent = parent;
+ adev->dev.of_node = parent->of_node;
+ adev->dev.release = drm_aux_bridge_release;
+
+ ret = auxiliary_device_init(adev);
+ if (ret) {
+ ida_free(&drm_aux_bridge_ida, adev->id);
+ kfree(adev);
+ return ret;
+ }
+
+ ret = auxiliary_device_add(adev);
+ if (ret) {
+ auxiliary_device_uninit(adev);
+ return ret;
+ }
+
+ return devm_add_action_or_reset(parent, drm_aux_bridge_unregister_adev, adev);
+}
+EXPORT_SYMBOL_GPL(drm_aux_bridge_register);
+
+struct drm_aux_bridge_data {
+ struct drm_bridge bridge;
+ struct drm_bridge *next_bridge;
+ struct device *dev;
+};
+
+static int drm_aux_bridge_attach(struct drm_bridge *bridge,
+ enum drm_bridge_attach_flags flags)
+{
+ struct drm_aux_bridge_data *data;
+
+ if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR))
+ return -EINVAL;
+
+ data = container_of(bridge, struct drm_aux_bridge_data, bridge);
+
+ return drm_bridge_attach(bridge->encoder, data->next_bridge, bridge,
+ DRM_BRIDGE_ATTACH_NO_CONNECTOR);
+}
+
+static const struct drm_bridge_funcs drm_aux_bridge_funcs = {
+ .attach = drm_aux_bridge_attach,
+};
+
+static int drm_aux_bridge_probe(struct auxiliary_device *auxdev,
+ const struct auxiliary_device_id *id)
+{
+ struct drm_aux_bridge_data *data;
+
+ data = devm_kzalloc(&auxdev->dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->dev = &auxdev->dev;
+ data->next_bridge = devm_drm_of_get_bridge(&auxdev->dev, auxdev->dev.of_node, 0, 0);
+ if (IS_ERR(data->next_bridge))
+ return dev_err_probe(&auxdev->dev, PTR_ERR(data->next_bridge),
+ "failed to acquire drm_bridge\n");
+
+ data->bridge.funcs = &drm_aux_bridge_funcs;
+ data->bridge.of_node = data->dev->of_node;
+
+ return devm_drm_bridge_add(data->dev, &data->bridge);
+}
+
+static const struct auxiliary_device_id drm_aux_bridge_table[] = {
+ { .name = KBUILD_MODNAME ".aux_bridge" },
+ {},
+};
+MODULE_DEVICE_TABLE(auxiliary, drm_aux_bridge_table);
+
+static struct auxiliary_driver drm_aux_bridge_drv = {
+ .name = "aux_bridge",
+ .id_table = drm_aux_bridge_table,
+ .probe = drm_aux_bridge_probe,
+};
+module_auxiliary_driver(drm_aux_bridge_drv);
+
+MODULE_AUTHOR("Dmitry Baryshkov <[email protected]>");
+MODULE_DESCRIPTION("DRM transparent bridge");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/bridge/aux-hpd-bridge.c b/drivers/gpu/drm/bridge/aux-hpd-bridge.c
new file mode 100644
index 000000000000..5d2ab3a715f9
--- /dev/null
+++ b/drivers/gpu/drm/bridge/aux-hpd-bridge.c
@@ -0,0 +1,163 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2023 Linaro Ltd.
+ *
+ * Author: Dmitry Baryshkov <[email protected]>
+ */
+#include <linux/auxiliary_bus.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+
+#include <drm/drm_bridge.h>
+#include <drm/bridge/aux-bridge.h>
+
+static DEFINE_IDA(drm_aux_hpd_bridge_ida);
+
+struct drm_aux_hpd_bridge_data {
+ struct drm_bridge bridge;
+ struct device *dev;
+};
+
+static void drm_aux_hpd_bridge_release(struct device *dev)
+{
+ struct auxiliary_device *adev = to_auxiliary_dev(dev);
+
+ ida_free(&drm_aux_hpd_bridge_ida, adev->id);
+
+ of_node_put(adev->dev.platform_data);
+
+ kfree(adev);
+}
+
+static void drm_aux_hpd_bridge_unregister_adev(void *_adev)
+{
+ struct auxiliary_device *adev = _adev;
+
+ auxiliary_device_delete(adev);
+ auxiliary_device_uninit(adev);
+}
+
+/**
+ * drm_dp_hpd_bridge_register - Create a simple HPD DisplayPort bridge
+ * @parent: device instance providing this bridge
+ * @np: device node pointer corresponding to this bridge instance
+ *
+ * Creates a simple DRM bridge with the type set to
+ * DRM_MODE_CONNECTOR_DisplayPort, which terminates the bridge chain and is
+ * able to send the HPD events.
+ *
+ * Return: device instance that will handle created bridge or an error code
+ * encoded into the pointer.
+ */
+struct device *drm_dp_hpd_bridge_register(struct device *parent,
+ struct device_node *np)
+{
+ struct auxiliary_device *adev;
+ int ret;
+
+ adev = kzalloc(sizeof(*adev), GFP_KERNEL);
+ if (!adev)
+ return ERR_PTR(-ENOMEM);
+
+ ret = ida_alloc(&drm_aux_hpd_bridge_ida, GFP_KERNEL);
+ if (ret < 0) {
+ kfree(adev);
+ return ERR_PTR(ret);
+ }
+
+ adev->id = ret;
+ adev->name = "dp_hpd_bridge";
+ adev->dev.parent = parent;
+ adev->dev.of_node = parent->of_node;
+ adev->dev.release = drm_aux_hpd_bridge_release;
+ adev->dev.platform_data = np;
+
+ ret = auxiliary_device_init(adev);
+ if (ret) {
+ ida_free(&drm_aux_hpd_bridge_ida, adev->id);
+ kfree(adev);
+ return ERR_PTR(ret);
+ }
+
+ ret = auxiliary_device_add(adev);
+ if (ret) {
+ auxiliary_device_uninit(adev);
+ return ERR_PTR(ret);
+ }
+
+ ret = devm_add_action_or_reset(parent, drm_aux_hpd_bridge_unregister_adev, adev);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return &adev->dev;
+}
+EXPORT_SYMBOL_GPL(drm_dp_hpd_bridge_register);
+
+/**
+ * drm_aux_hpd_bridge_notify - notify hot plug detection events
+ * @dev: device created for the HPD bridge
+ * @status: output connection status
+ *
+ * A wrapper around drm_bridge_hpd_notify() that is used to report hot plug
+ * detection events for bridges created via drm_dp_hpd_bridge_register().
+ *
+ * This function shall be called in a context that can sleep.
+ */
+void drm_aux_hpd_bridge_notify(struct device *dev, enum drm_connector_status status)
+{
+ struct auxiliary_device *adev = to_auxiliary_dev(dev);
+ struct drm_aux_hpd_bridge_data *data = auxiliary_get_drvdata(adev);
+
+ if (!data)
+ return;
+
+ drm_bridge_hpd_notify(&data->bridge, status);
+}
+EXPORT_SYMBOL_GPL(drm_aux_hpd_bridge_notify);
+
+static int drm_aux_hpd_bridge_attach(struct drm_bridge *bridge,
+ enum drm_bridge_attach_flags flags)
+{
+ return flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR ? 0 : -EINVAL;
+}
+
+static const struct drm_bridge_funcs drm_aux_hpd_bridge_funcs = {
+ .attach = drm_aux_hpd_bridge_attach,
+};
+
+static int drm_aux_hpd_bridge_probe(struct auxiliary_device *auxdev,
+ const struct auxiliary_device_id *id)
+{
+ struct drm_aux_hpd_bridge_data *data;
+
+ data = devm_kzalloc(&auxdev->dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->dev = &auxdev->dev;
+ data->bridge.funcs = &drm_aux_hpd_bridge_funcs;
+ data->bridge.of_node = dev_get_platdata(data->dev);
+ data->bridge.ops = DRM_BRIDGE_OP_HPD;
+ data->bridge.type = id->driver_data;
+
+ auxiliary_set_drvdata(auxdev, data);
+
+ return devm_drm_bridge_add(data->dev, &data->bridge);
+}
+
+static const struct auxiliary_device_id drm_aux_hpd_bridge_table[] = {
+ { .name = KBUILD_MODNAME ".dp_hpd_bridge", .driver_data = DRM_MODE_CONNECTOR_DisplayPort, },
+ {},
+};
+MODULE_DEVICE_TABLE(auxiliary, drm_aux_hpd_bridge_table);
+
+static struct auxiliary_driver drm_aux_hpd_bridge_drv = {
+ .name = "aux_hpd_bridge",
+ .id_table = drm_aux_hpd_bridge_table,
+ .probe = drm_aux_hpd_bridge_probe,
+};
+module_auxiliary_driver(drm_aux_hpd_bridge_drv);
+
+MODULE_AUTHOR("Dmitry Baryshkov <[email protected]>");
+MODULE_DESCRIPTION("DRM HPD bridge");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-hdcp.c b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-hdcp.c
index 946212a95598..5e3b8edcf794 100644
--- a/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-hdcp.c
+++ b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-hdcp.c
@@ -403,7 +403,8 @@ static int _cdns_mhdp_hdcp_disable(struct cdns_mhdp_device *mhdp)
static int _cdns_mhdp_hdcp_enable(struct cdns_mhdp_device *mhdp, u8 content_type)
{
- int ret, tries = 3;
+ int ret = -EINVAL;
+ int tries = 3;
u32 i;
for (i = 0; i < tries; i++) {
diff --git a/drivers/gpu/drm/bridge/imx/imx93-mipi-dsi.c b/drivers/gpu/drm/bridge/imx/imx93-mipi-dsi.c
index 3ff30ce80c5b..2347f8dd632f 100644
--- a/drivers/gpu/drm/bridge/imx/imx93-mipi-dsi.c
+++ b/drivers/gpu/drm/bridge/imx/imx93-mipi-dsi.c
@@ -226,8 +226,8 @@ dphy_pll_get_configure_from_opts(struct imx93_dsi *dsi,
unsigned long fout;
unsigned long best_fout = 0;
unsigned int fvco_div;
- unsigned int min_n, max_n, n, best_n;
- unsigned long m, best_m;
+ unsigned int min_n, max_n, n, best_n = UINT_MAX;
+ unsigned long m, best_m = 0;
unsigned long min_delta = ULONG_MAX;
unsigned long delta;
u64 tmp;
diff --git a/drivers/gpu/drm/bridge/lontium-lt8912b.c b/drivers/gpu/drm/bridge/lontium-lt8912b.c
index 03532efb893b..273157428c82 100644
--- a/drivers/gpu/drm/bridge/lontium-lt8912b.c
+++ b/drivers/gpu/drm/bridge/lontium-lt8912b.c
@@ -43,6 +43,8 @@ struct lt8912 {
struct videomode mode;
+ struct regulator_bulk_data supplies[7];
+
u8 data_lanes;
bool is_power_on;
};
@@ -257,6 +259,12 @@ static int lt8912_free_i2c(struct lt8912 *lt)
static int lt8912_hard_power_on(struct lt8912 *lt)
{
+ int ret;
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(lt->supplies), lt->supplies);
+ if (ret)
+ return ret;
+
gpiod_set_value_cansleep(lt->gp_reset, 0);
msleep(20);
@@ -267,6 +275,9 @@ static void lt8912_hard_power_off(struct lt8912 *lt)
{
gpiod_set_value_cansleep(lt->gp_reset, 1);
msleep(20);
+
+ regulator_bulk_disable(ARRAY_SIZE(lt->supplies), lt->supplies);
+
lt->is_power_on = false;
}
@@ -634,6 +645,48 @@ static const struct drm_bridge_funcs lt8912_bridge_funcs = {
.get_edid = lt8912_bridge_get_edid,
};
+static int lt8912_bridge_resume(struct device *dev)
+{
+ struct lt8912 *lt = dev_get_drvdata(dev);
+ int ret;
+
+ ret = lt8912_hard_power_on(lt);
+ if (ret)
+ return ret;
+
+ ret = lt8912_soft_power_on(lt);
+ if (ret)
+ return ret;
+
+ return lt8912_video_on(lt);
+}
+
+static int lt8912_bridge_suspend(struct device *dev)
+{
+ struct lt8912 *lt = dev_get_drvdata(dev);
+
+ lt8912_hard_power_off(lt);
+
+ return 0;
+}
+
+static DEFINE_SIMPLE_DEV_PM_OPS(lt8912_bridge_pm_ops, lt8912_bridge_suspend, lt8912_bridge_resume);
+
+static int lt8912_get_regulators(struct lt8912 *lt)
+{
+ unsigned int i;
+ const char * const supply_names[] = {
+ "vdd", "vccmipirx", "vccsysclk", "vcclvdstx",
+ "vcchdmitx", "vcclvdspll", "vcchdmipll"
+ };
+
+ for (i = 0; i < ARRAY_SIZE(lt->supplies); i++)
+ lt->supplies[i].supply = supply_names[i];
+
+ return devm_regulator_bulk_get(lt->dev, ARRAY_SIZE(lt->supplies),
+ lt->supplies);
+}
+
static int lt8912_parse_dt(struct lt8912 *lt)
{
struct gpio_desc *gp_reset;
@@ -685,6 +738,10 @@ static int lt8912_parse_dt(struct lt8912 *lt)
goto err_free_host_node;
}
+ ret = lt8912_get_regulators(lt);
+ if (ret)
+ goto err_free_host_node;
+
of_node_put(port_node);
return 0;
@@ -770,6 +827,7 @@ static struct i2c_driver lt8912_i2c_driver = {
.driver = {
.name = "lt8912",
.of_match_table = lt8912_dt_match,
+ .pm = pm_sleep_ptr(&lt8912_bridge_pm_ops),
},
.probe = lt8912_probe,
.remove = lt8912_remove,
diff --git a/drivers/gpu/drm/bridge/nxp-ptn3460.c b/drivers/gpu/drm/bridge/nxp-ptn3460.c
index d81920227a8a..7c0076e49953 100644
--- a/drivers/gpu/drm/bridge/nxp-ptn3460.c
+++ b/drivers/gpu/drm/bridge/nxp-ptn3460.c
@@ -54,13 +54,13 @@ static int ptn3460_read_bytes(struct ptn3460_bridge *ptn_bridge, char addr,
int ret;
ret = i2c_master_send(ptn_bridge->client, &addr, 1);
- if (ret <= 0) {
+ if (ret < 0) {
DRM_ERROR("Failed to send i2c command, ret=%d\n", ret);
return ret;
}
ret = i2c_master_recv(ptn_bridge->client, buf, len);
- if (ret <= 0) {
+ if (ret < 0) {
DRM_ERROR("Failed to recv i2c data, ret=%d\n", ret);
return ret;
}
@@ -78,7 +78,7 @@ static int ptn3460_write_byte(struct ptn3460_bridge *ptn_bridge, char addr,
buf[1] = val;
ret = i2c_master_send(ptn_bridge->client, buf, ARRAY_SIZE(buf));
- if (ret <= 0) {
+ if (ret < 0) {
DRM_ERROR("Failed to send i2c command, ret=%d\n", ret);
return ret;
}
diff --git a/drivers/gpu/drm/bridge/panel.c b/drivers/gpu/drm/bridge/panel.c
index e48823a4f1ed..7f41525f7a6e 100644
--- a/drivers/gpu/drm/bridge/panel.c
+++ b/drivers/gpu/drm/bridge/panel.c
@@ -4,8 +4,6 @@
* Copyright (C) 2017 Broadcom
*/
-#include <linux/device.h>
-
#include <drm/drm_atomic_helper.h>
#include <drm/drm_bridge.h>
#include <drm/drm_connector.h>
@@ -21,7 +19,6 @@ struct panel_bridge {
struct drm_bridge bridge;
struct drm_connector connector;
struct drm_panel *panel;
- struct device_link *link;
u32 connector_type;
};
@@ -63,24 +60,13 @@ static int panel_bridge_attach(struct drm_bridge *bridge,
{
struct panel_bridge *panel_bridge = drm_bridge_to_panel_bridge(bridge);
struct drm_connector *connector = &panel_bridge->connector;
- struct drm_panel *panel = panel_bridge->panel;
- struct drm_device *drm_dev = bridge->dev;
int ret;
- panel_bridge->link = device_link_add(drm_dev->dev, panel->dev,
- DL_FLAG_STATELESS);
- if (!panel_bridge->link) {
- DRM_ERROR("Failed to add device link between %s and %s\n",
- dev_name(drm_dev->dev), dev_name(panel->dev));
- return -EINVAL;
- }
-
if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR)
return 0;
if (!bridge->encoder) {
DRM_ERROR("Missing encoder\n");
- device_link_del(panel_bridge->link);
return -ENODEV;
}
@@ -92,7 +78,6 @@ static int panel_bridge_attach(struct drm_bridge *bridge,
panel_bridge->connector_type);
if (ret) {
DRM_ERROR("Failed to initialize connector\n");
- device_link_del(panel_bridge->link);
return ret;
}
@@ -115,8 +100,6 @@ static void panel_bridge_detach(struct drm_bridge *bridge)
struct panel_bridge *panel_bridge = drm_bridge_to_panel_bridge(bridge);
struct drm_connector *connector = &panel_bridge->connector;
- device_link_del(panel_bridge->link);
-
/*
* Cleanup the connector if we know it was initialized.
*
diff --git a/drivers/gpu/drm/bridge/tc358767.c b/drivers/gpu/drm/bridge/tc358767.c
index ef2e373606ba..615cc8f950d7 100644
--- a/drivers/gpu/drm/bridge/tc358767.c
+++ b/drivers/gpu/drm/bridge/tc358767.c
@@ -2273,7 +2273,7 @@ static int tc_probe(struct i2c_client *client)
} else {
if (tc->hpd_pin < 0 || tc->hpd_pin > 1) {
dev_err(dev, "failed to parse HPD number\n");
- return ret;
+ return -EINVAL;
}
}
diff --git a/drivers/gpu/drm/bridge/ti-sn65dsi86.c b/drivers/gpu/drm/bridge/ti-sn65dsi86.c
index c45c07840f64..5b8e1dfc458d 100644
--- a/drivers/gpu/drm/bridge/ti-sn65dsi86.c
+++ b/drivers/gpu/drm/bridge/ti-sn65dsi86.c
@@ -1413,11 +1413,9 @@ static int ti_sn_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
int ret;
if (!pdata->pwm_enabled) {
- ret = pm_runtime_get_sync(pdata->dev);
- if (ret < 0) {
- pm_runtime_put_sync(pdata->dev);
+ ret = pm_runtime_resume_and_get(pdata->dev);
+ if (ret < 0)
return ret;
- }
}
if (state->enabled) {
diff --git a/drivers/gpu/drm/drm_agpsupport.c b/drivers/gpu/drm/drm_agpsupport.c
deleted file mode 100644
index a4ad6fd13abc..000000000000
--- a/drivers/gpu/drm/drm_agpsupport.c
+++ /dev/null
@@ -1,451 +0,0 @@
-/*
- * \file drm_agpsupport.c
- * DRM support for AGP/GART backend
- *
- * \author Rickard E. (Rik) Faith <[email protected]>
- * \author Gareth Hughes <[email protected]>
- */
-
-/*
- * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
- * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
-#include <linux/module.h>
-#include <linux/pci.h>
-#include <linux/slab.h>
-
-#if IS_ENABLED(CONFIG_AGP)
-#include <asm/agp.h>
-#endif
-
-#include <drm/drm_device.h>
-#include <drm/drm_drv.h>
-#include <drm/drm_file.h>
-#include <drm/drm_print.h>
-
-#include "drm_legacy.h"
-
-#if IS_ENABLED(CONFIG_AGP)
-
-/*
- * Get AGP information.
- *
- * \return zero on success or a negative number on failure.
- *
- * Verifies the AGP device has been initialized and acquired and fills in the
- * drm_agp_info structure with the information in drm_agp_head::agp_info.
- */
-int drm_legacy_agp_info(struct drm_device *dev, struct drm_agp_info *info)
-{
- struct agp_kern_info *kern;
-
- if (!dev->agp || !dev->agp->acquired)
- return -EINVAL;
-
- kern = &dev->agp->agp_info;
- info->agp_version_major = kern->version.major;
- info->agp_version_minor = kern->version.minor;
- info->mode = kern->mode;
- info->aperture_base = kern->aper_base;
- info->aperture_size = kern->aper_size * 1024 * 1024;
- info->memory_allowed = kern->max_memory << PAGE_SHIFT;
- info->memory_used = kern->current_memory << PAGE_SHIFT;
- info->id_vendor = kern->device->vendor;
- info->id_device = kern->device->device;
-
- return 0;
-}
-EXPORT_SYMBOL(drm_legacy_agp_info);
-
-int drm_legacy_agp_info_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_agp_info *info = data;
- int err;
-
- err = drm_legacy_agp_info(dev, info);
- if (err)
- return err;
-
- return 0;
-}
-
-/*
- * Acquire the AGP device.
- *
- * \param dev DRM device that is to acquire AGP.
- * \return zero on success or a negative number on failure.
- *
- * Verifies the AGP device hasn't been acquired before and calls
- * \c agp_backend_acquire.
- */
-int drm_legacy_agp_acquire(struct drm_device *dev)
-{
- struct pci_dev *pdev = to_pci_dev(dev->dev);
-
- if (!dev->agp)
- return -ENODEV;
- if (dev->agp->acquired)
- return -EBUSY;
- dev->agp->bridge = agp_backend_acquire(pdev);
- if (!dev->agp->bridge)
- return -ENODEV;
- dev->agp->acquired = 1;
- return 0;
-}
-EXPORT_SYMBOL(drm_legacy_agp_acquire);
-
-/*
- * Acquire the AGP device (ioctl).
- *
- * \return zero on success or a negative number on failure.
- *
- * Verifies the AGP device hasn't been acquired before and calls
- * \c agp_backend_acquire.
- */
-int drm_legacy_agp_acquire_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- return drm_legacy_agp_acquire((struct drm_device *)file_priv->minor->dev);
-}
-
-/*
- * Release the AGP device.
- *
- * \param dev DRM device that is to release AGP.
- * \return zero on success or a negative number on failure.
- *
- * Verifies the AGP device has been acquired and calls \c agp_backend_release.
- */
-int drm_legacy_agp_release(struct drm_device *dev)
-{
- if (!dev->agp || !dev->agp->acquired)
- return -EINVAL;
- agp_backend_release(dev->agp->bridge);
- dev->agp->acquired = 0;
- return 0;
-}
-EXPORT_SYMBOL(drm_legacy_agp_release);
-
-int drm_legacy_agp_release_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- return drm_legacy_agp_release(dev);
-}
-
-/*
- * Enable the AGP bus.
- *
- * \param dev DRM device that has previously acquired AGP.
- * \param mode Requested AGP mode.
- * \return zero on success or a negative number on failure.
- *
- * Verifies the AGP device has been acquired but not enabled, and calls
- * \c agp_enable.
- */
-int drm_legacy_agp_enable(struct drm_device *dev, struct drm_agp_mode mode)
-{
- if (!dev->agp || !dev->agp->acquired)
- return -EINVAL;
-
- dev->agp->mode = mode.mode;
- agp_enable(dev->agp->bridge, mode.mode);
- dev->agp->enabled = 1;
- return 0;
-}
-EXPORT_SYMBOL(drm_legacy_agp_enable);
-
-int drm_legacy_agp_enable_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_agp_mode *mode = data;
-
- return drm_legacy_agp_enable(dev, *mode);
-}
-
-/*
- * Allocate AGP memory.
- *
- * \return zero on success or a negative number on failure.
- *
- * Verifies the AGP device is present and has been acquired, allocates the
- * memory via agp_allocate_memory() and creates a drm_agp_mem entry for it.
- */
-int drm_legacy_agp_alloc(struct drm_device *dev, struct drm_agp_buffer *request)
-{
- struct drm_agp_mem *entry;
- struct agp_memory *memory;
- unsigned long pages;
- u32 type;
-
- if (!dev->agp || !dev->agp->acquired)
- return -EINVAL;
- entry = kzalloc(sizeof(*entry), GFP_KERNEL);
- if (!entry)
- return -ENOMEM;
-
- pages = DIV_ROUND_UP(request->size, PAGE_SIZE);
- type = (u32) request->type;
- memory = agp_allocate_memory(dev->agp->bridge, pages, type);
- if (!memory) {
- kfree(entry);
- return -ENOMEM;
- }
-
- entry->handle = (unsigned long)memory->key + 1;
- entry->memory = memory;
- entry->bound = 0;
- entry->pages = pages;
- list_add(&entry->head, &dev->agp->memory);
-
- request->handle = entry->handle;
- request->physical = memory->physical;
-
- return 0;
-}
-EXPORT_SYMBOL(drm_legacy_agp_alloc);
-
-
-int drm_legacy_agp_alloc_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_agp_buffer *request = data;
-
- return drm_legacy_agp_alloc(dev, request);
-}
-
-/*
- * Search for the AGP memory entry associated with a handle.
- *
- * \param dev DRM device structure.
- * \param handle AGP memory handle.
- * \return pointer to the drm_agp_mem structure associated with \p handle.
- *
- * Walks through drm_agp_head::memory until finding a matching handle.
- */
-static struct drm_agp_mem *drm_legacy_agp_lookup_entry(struct drm_device *dev,
- unsigned long handle)
-{
- struct drm_agp_mem *entry;
-
- list_for_each_entry(entry, &dev->agp->memory, head) {
- if (entry->handle == handle)
- return entry;
- }
- return NULL;
-}
-
-/*
- * Unbind AGP memory from the GATT (ioctl).
- *
- * \return zero on success or a negative number on failure.
- *
- * Verifies the AGP device is present and acquired, looks-up the AGP memory
- * entry and passes it to the unbind_agp() function.
- */
-int drm_legacy_agp_unbind(struct drm_device *dev, struct drm_agp_binding *request)
-{
- struct drm_agp_mem *entry;
- int ret;
-
- if (!dev->agp || !dev->agp->acquired)
- return -EINVAL;
- entry = drm_legacy_agp_lookup_entry(dev, request->handle);
- if (!entry || !entry->bound)
- return -EINVAL;
- ret = agp_unbind_memory(entry->memory);
- if (ret == 0)
- entry->bound = 0;
- return ret;
-}
-EXPORT_SYMBOL(drm_legacy_agp_unbind);
-
-
-int drm_legacy_agp_unbind_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_agp_binding *request = data;
-
- return drm_legacy_agp_unbind(dev, request);
-}
-
-/*
- * Bind AGP memory into the GATT (ioctl)
- *
- * \return zero on success or a negative number on failure.
- *
- * Verifies the AGP device is present and has been acquired and that no memory
- * is currently bound into the GATT. Looks-up the AGP memory entry and passes
- * it to bind_agp() function.
- */
-int drm_legacy_agp_bind(struct drm_device *dev, struct drm_agp_binding *request)
-{
- struct drm_agp_mem *entry;
- int retcode;
- int page;
-
- if (!dev->agp || !dev->agp->acquired)
- return -EINVAL;
- entry = drm_legacy_agp_lookup_entry(dev, request->handle);
- if (!entry || entry->bound)
- return -EINVAL;
- page = DIV_ROUND_UP(request->offset, PAGE_SIZE);
- retcode = agp_bind_memory(entry->memory, page);
- if (retcode)
- return retcode;
- entry->bound = dev->agp->base + (page << PAGE_SHIFT);
- DRM_DEBUG("base = 0x%lx entry->bound = 0x%lx\n",
- dev->agp->base, entry->bound);
- return 0;
-}
-EXPORT_SYMBOL(drm_legacy_agp_bind);
-
-
-int drm_legacy_agp_bind_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_agp_binding *request = data;
-
- return drm_legacy_agp_bind(dev, request);
-}
-
-/*
- * Free AGP memory (ioctl).
- *
- * \return zero on success or a negative number on failure.
- *
- * Verifies the AGP device is present and has been acquired and looks up the
- * AGP memory entry. If the memory is currently bound, unbind it via
- * unbind_agp(). Frees it via free_agp() as well as the entry itself
- * and unlinks from the doubly linked list it's inserted in.
- */
-int drm_legacy_agp_free(struct drm_device *dev, struct drm_agp_buffer *request)
-{
- struct drm_agp_mem *entry;
-
- if (!dev->agp || !dev->agp->acquired)
- return -EINVAL;
- entry = drm_legacy_agp_lookup_entry(dev, request->handle);
- if (!entry)
- return -EINVAL;
- if (entry->bound)
- agp_unbind_memory(entry->memory);
-
- list_del(&entry->head);
-
- agp_free_memory(entry->memory);
- kfree(entry);
- return 0;
-}
-EXPORT_SYMBOL(drm_legacy_agp_free);
-
-
-int drm_legacy_agp_free_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_agp_buffer *request = data;
-
- return drm_legacy_agp_free(dev, request);
-}
-
-/*
- * Initialize the AGP resources.
- *
- * \return pointer to a drm_agp_head structure.
- *
- * Gets the drm_agp_t structure which is made available by the agpgart module
- * via the inter_module_* functions. Creates and initializes a drm_agp_head
- * structure.
- *
- * Note that final cleanup of the kmalloced structure is directly done in
- * drm_pci_agp_destroy.
- */
-struct drm_agp_head *drm_legacy_agp_init(struct drm_device *dev)
-{
- struct pci_dev *pdev = to_pci_dev(dev->dev);
- struct drm_agp_head *head = NULL;
-
- head = kzalloc(sizeof(*head), GFP_KERNEL);
- if (!head)
- return NULL;
- head->bridge = agp_find_bridge(pdev);
- if (!head->bridge) {
- head->bridge = agp_backend_acquire(pdev);
- if (!head->bridge) {
- kfree(head);
- return NULL;
- }
- agp_copy_info(head->bridge, &head->agp_info);
- agp_backend_release(head->bridge);
- } else {
- agp_copy_info(head->bridge, &head->agp_info);
- }
- if (head->agp_info.chipset == NOT_SUPPORTED) {
- kfree(head);
- return NULL;
- }
- INIT_LIST_HEAD(&head->memory);
- head->cant_use_aperture = head->agp_info.cant_use_aperture;
- head->page_mask = head->agp_info.page_mask;
- head->base = head->agp_info.aper_base;
- return head;
-}
-/* Only exported for i810.ko */
-EXPORT_SYMBOL(drm_legacy_agp_init);
-
-/**
- * drm_legacy_agp_clear - Clear AGP resource list
- * @dev: DRM device
- *
- * Iterate over all AGP resources and remove them. But keep the AGP head
- * intact so it can still be used. It is safe to call this if AGP is disabled or
- * was already removed.
- *
- * Cleanup is only done for drivers who have DRIVER_LEGACY set.
- */
-void drm_legacy_agp_clear(struct drm_device *dev)
-{
- struct drm_agp_mem *entry, *tempe;
-
- if (!dev->agp)
- return;
- if (!drm_core_check_feature(dev, DRIVER_LEGACY))
- return;
-
- list_for_each_entry_safe(entry, tempe, &dev->agp->memory, head) {
- if (entry->bound)
- agp_unbind_memory(entry->memory);
- agp_free_memory(entry->memory);
- kfree(entry);
- }
- INIT_LIST_HEAD(&dev->agp->memory);
-
- if (dev->agp->acquired)
- drm_legacy_agp_release(dev);
-
- dev->agp->acquired = 0;
- dev->agp->enabled = 0;
-}
-
-#endif
diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c
index f1a503aafe5a..c31fc0b48c31 100644
--- a/drivers/gpu/drm/drm_atomic.c
+++ b/drivers/gpu/drm/drm_atomic.c
@@ -1773,6 +1773,7 @@ static void __drm_state_dump(struct drm_device *dev, struct drm_printer *p,
struct drm_crtc *crtc;
struct drm_connector *connector;
struct drm_connector_list_iter conn_iter;
+ struct drm_private_obj *obj;
if (!drm_drv_uses_atomic_modeset(dev))
return;
@@ -1801,6 +1802,14 @@ static void __drm_state_dump(struct drm_device *dev, struct drm_printer *p,
if (take_locks)
drm_modeset_unlock(&dev->mode_config.connection_mutex);
drm_connector_list_iter_end(&conn_iter);
+
+ list_for_each_entry(obj, &config->privobj_list, head) {
+ if (take_locks)
+ drm_modeset_lock(&obj->lock, NULL);
+ drm_atomic_private_obj_print_state(p, obj->state);
+ if (take_locks)
+ drm_modeset_unlock(&obj->lock);
+ }
}
/**
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index c3f677130def..a920fbae714c 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -2012,7 +2012,7 @@ int drm_atomic_helper_commit(struct drm_device *dev,
return ret;
drm_atomic_helper_async_commit(dev, state);
- drm_atomic_helper_cleanup_planes(dev, state);
+ drm_atomic_helper_unprepare_planes(dev, state);
return 0;
}
@@ -2072,7 +2072,7 @@ int drm_atomic_helper_commit(struct drm_device *dev,
return 0;
err:
- drm_atomic_helper_cleanup_planes(dev, state);
+ drm_atomic_helper_unprepare_planes(dev, state);
return ret;
}
EXPORT_SYMBOL(drm_atomic_helper_commit);
@@ -2650,6 +2650,39 @@ fail_prepare_fb:
}
EXPORT_SYMBOL(drm_atomic_helper_prepare_planes);
+/**
+ * drm_atomic_helper_unprepare_planes - release plane resources on aborts
+ * @dev: DRM device
+ * @state: atomic state object with old state structures
+ *
+ * This function cleans up plane state, specifically framebuffers, from the
+ * atomic state. It undoes the effects of drm_atomic_helper_prepare_planes()
+ * when aborting an atomic commit. For cleaning up after a successful commit
+ * use drm_atomic_helper_cleanup_planes().
+ */
+void drm_atomic_helper_unprepare_planes(struct drm_device *dev,
+ struct drm_atomic_state *state)
+{
+ struct drm_plane *plane;
+ struct drm_plane_state *new_plane_state;
+ int i;
+
+ for_each_new_plane_in_state(state, plane, new_plane_state, i) {
+ const struct drm_plane_helper_funcs *funcs = plane->helper_private;
+
+ if (funcs->end_fb_access)
+ funcs->end_fb_access(plane, new_plane_state);
+ }
+
+ for_each_new_plane_in_state(state, plane, new_plane_state, i) {
+ const struct drm_plane_helper_funcs *funcs = plane->helper_private;
+
+ if (funcs->cleanup_fb)
+ funcs->cleanup_fb(plane, new_plane_state);
+ }
+}
+EXPORT_SYMBOL(drm_atomic_helper_unprepare_planes);
+
static bool plane_crtc_active(const struct drm_plane_state *state)
{
return state->crtc && state->crtc->state->active;
@@ -2784,6 +2817,17 @@ void drm_atomic_helper_commit_planes(struct drm_device *dev,
funcs->atomic_flush(crtc, old_state);
}
+
+ /*
+ * Signal end of framebuffer access here before hw_done. After hw_done,
+ * a later commit might have already released the plane state.
+ */
+ for_each_old_plane_in_state(old_state, plane, old_plane_state, i) {
+ const struct drm_plane_helper_funcs *funcs = plane->helper_private;
+
+ if (funcs->end_fb_access)
+ funcs->end_fb_access(plane, old_plane_state);
+ }
}
EXPORT_SYMBOL(drm_atomic_helper_commit_planes);
@@ -2911,40 +2955,22 @@ EXPORT_SYMBOL(drm_atomic_helper_disable_planes_on_crtc);
* configuration. Hence the old configuration must be perserved in @old_state to
* be able to call this function.
*
- * This function must also be called on the new state when the atomic update
- * fails at any point after calling drm_atomic_helper_prepare_planes().
+ * This function may not be called on the new state when the atomic update
+ * fails at any point after calling drm_atomic_helper_prepare_planes(). Use
+ * drm_atomic_helper_unprepare_planes() in this case.
*/
void drm_atomic_helper_cleanup_planes(struct drm_device *dev,
struct drm_atomic_state *old_state)
{
struct drm_plane *plane;
- struct drm_plane_state *old_plane_state, *new_plane_state;
+ struct drm_plane_state *old_plane_state;
int i;
- for_each_oldnew_plane_in_state(old_state, plane, old_plane_state, new_plane_state, i) {
+ for_each_old_plane_in_state(old_state, plane, old_plane_state, i) {
const struct drm_plane_helper_funcs *funcs = plane->helper_private;
- if (funcs->end_fb_access)
- funcs->end_fb_access(plane, new_plane_state);
- }
-
- for_each_oldnew_plane_in_state(old_state, plane, old_plane_state, new_plane_state, i) {
- const struct drm_plane_helper_funcs *funcs;
- struct drm_plane_state *plane_state;
-
- /*
- * This might be called before swapping when commit is aborted,
- * in which case we have to cleanup the new state.
- */
- if (old_plane_state == plane->state)
- plane_state = new_plane_state;
- else
- plane_state = old_plane_state;
-
- funcs = plane->helper_private;
-
if (funcs->cleanup_fb)
- funcs->cleanup_fb(plane, plane_state);
+ funcs->cleanup_fb(plane, old_plane_state);
}
}
EXPORT_SYMBOL(drm_atomic_helper_cleanup_planes);
diff --git a/drivers/gpu/drm/drm_atomic_state_helper.c b/drivers/gpu/drm/drm_atomic_state_helper.c
index 784e63d70a42..54975de44a0e 100644
--- a/drivers/gpu/drm/drm_atomic_state_helper.c
+++ b/drivers/gpu/drm/drm_atomic_state_helper.c
@@ -275,6 +275,20 @@ void __drm_atomic_helper_plane_state_reset(struct drm_plane_state *plane_state,
plane_state->normalized_zpos = val;
}
}
+
+ if (plane->hotspot_x_property) {
+ if (!drm_object_property_get_default_value(&plane->base,
+ plane->hotspot_x_property,
+ &val))
+ plane_state->hotspot_x = val;
+ }
+
+ if (plane->hotspot_y_property) {
+ if (!drm_object_property_get_default_value(&plane->base,
+ plane->hotspot_y_property,
+ &val))
+ plane_state->hotspot_y = val;
+ }
}
EXPORT_SYMBOL(__drm_atomic_helper_plane_state_reset);
diff --git a/drivers/gpu/drm/drm_atomic_uapi.c b/drivers/gpu/drm/drm_atomic_uapi.c
index 98d3b10c08ae..aee4a65d4959 100644
--- a/drivers/gpu/drm/drm_atomic_uapi.c
+++ b/drivers/gpu/drm/drm_atomic_uapi.c
@@ -593,6 +593,22 @@ static int drm_atomic_plane_set_property(struct drm_plane *plane,
} else if (plane->funcs->atomic_set_property) {
return plane->funcs->atomic_set_property(plane, state,
property, val);
+ } else if (property == plane->hotspot_x_property) {
+ if (plane->type != DRM_PLANE_TYPE_CURSOR) {
+ drm_dbg_atomic(plane->dev,
+ "[PLANE:%d:%s] is not a cursor plane: 0x%llx\n",
+ plane->base.id, plane->name, val);
+ return -EINVAL;
+ }
+ state->hotspot_x = val;
+ } else if (property == plane->hotspot_y_property) {
+ if (plane->type != DRM_PLANE_TYPE_CURSOR) {
+ drm_dbg_atomic(plane->dev,
+ "[PLANE:%d:%s] is not a cursor plane: 0x%llx\n",
+ plane->base.id, plane->name, val);
+ return -EINVAL;
+ }
+ state->hotspot_y = val;
} else {
drm_dbg_atomic(plane->dev,
"[PLANE:%d:%s] unknown property [PROP:%d:%s]\n",
@@ -653,6 +669,10 @@ drm_atomic_plane_get_property(struct drm_plane *plane,
*val = state->scaling_filter;
} else if (plane->funcs->atomic_get_property) {
return plane->funcs->atomic_get_property(plane, state, property, val);
+ } else if (property == plane->hotspot_x_property) {
+ *val = state->hotspot_x;
+ } else if (property == plane->hotspot_y_property) {
+ *val = state->hotspot_y;
} else {
drm_dbg_atomic(dev,
"[PLANE:%d:%s] unknown property [PROP:%d:%s]\n",
@@ -1006,13 +1026,28 @@ out:
return ret;
}
+static int drm_atomic_check_prop_changes(int ret, uint64_t old_val, uint64_t prop_value,
+ struct drm_property *prop)
+{
+ if (ret != 0 || old_val != prop_value) {
+ drm_dbg_atomic(prop->dev,
+ "[PROP:%d:%s] No prop can be changed during async flip\n",
+ prop->base.id, prop->name);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
int drm_atomic_set_property(struct drm_atomic_state *state,
struct drm_file *file_priv,
struct drm_mode_object *obj,
struct drm_property *prop,
- uint64_t prop_value)
+ u64 prop_value,
+ bool async_flip)
{
struct drm_mode_object *ref;
+ u64 old_val;
int ret;
if (!drm_property_change_valid_get(prop, prop_value, &ref))
@@ -1029,6 +1064,13 @@ int drm_atomic_set_property(struct drm_atomic_state *state,
break;
}
+ if (async_flip) {
+ ret = drm_atomic_connector_get_property(connector, connector_state,
+ prop, &old_val);
+ ret = drm_atomic_check_prop_changes(ret, old_val, prop_value, prop);
+ break;
+ }
+
ret = drm_atomic_connector_set_property(connector,
connector_state, file_priv,
prop, prop_value);
@@ -1044,6 +1086,13 @@ int drm_atomic_set_property(struct drm_atomic_state *state,
break;
}
+ if (async_flip) {
+ ret = drm_atomic_crtc_get_property(crtc, crtc_state,
+ prop, &old_val);
+ ret = drm_atomic_check_prop_changes(ret, old_val, prop_value, prop);
+ break;
+ }
+
ret = drm_atomic_crtc_set_property(crtc,
crtc_state, prop, prop_value);
break;
@@ -1051,6 +1100,7 @@ int drm_atomic_set_property(struct drm_atomic_state *state,
case DRM_MODE_OBJECT_PLANE: {
struct drm_plane *plane = obj_to_plane(obj);
struct drm_plane_state *plane_state;
+ struct drm_mode_config *config = &plane->dev->mode_config;
plane_state = drm_atomic_get_plane_state(state, plane);
if (IS_ERR(plane_state)) {
@@ -1058,6 +1108,21 @@ int drm_atomic_set_property(struct drm_atomic_state *state,
break;
}
+ if (async_flip && prop != config->prop_fb_id) {
+ ret = drm_atomic_plane_get_property(plane, plane_state,
+ prop, &old_val);
+ ret = drm_atomic_check_prop_changes(ret, old_val, prop_value, prop);
+ break;
+ }
+
+ if (async_flip && plane_state->plane->type != DRM_PLANE_TYPE_PRIMARY) {
+ drm_dbg_atomic(prop->dev,
+ "[OBJECT:%d] Only primary planes can be changed during async flip\n",
+ obj->id);
+ ret = -EINVAL;
+ break;
+ }
+
ret = drm_atomic_plane_set_property(plane,
plane_state, file_priv,
prop, prop_value);
@@ -1323,6 +1388,18 @@ static void complete_signaling(struct drm_device *dev,
kfree(fence_state);
}
+static void
+set_async_flip(struct drm_atomic_state *state)
+{
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *crtc_state;
+ int i;
+
+ for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
+ crtc_state->async_flip = true;
+ }
+}
+
int drm_mode_atomic_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv)
{
@@ -1337,6 +1414,7 @@ int drm_mode_atomic_ioctl(struct drm_device *dev,
struct drm_out_fence_state *fence_state;
int ret = 0;
unsigned int i, j, num_fences;
+ bool async_flip = false;
/* disallow for drivers not supporting atomic: */
if (!drm_core_check_feature(dev, DRIVER_ATOMIC))
@@ -1363,9 +1441,13 @@ int drm_mode_atomic_ioctl(struct drm_device *dev,
}
if (arg->flags & DRM_MODE_PAGE_FLIP_ASYNC) {
- drm_dbg_atomic(dev,
- "commit failed: invalid flag DRM_MODE_PAGE_FLIP_ASYNC\n");
- return -EINVAL;
+ if (!dev->mode_config.async_page_flip) {
+ drm_dbg_atomic(dev,
+ "commit failed: DRM_MODE_PAGE_FLIP_ASYNC not supported\n");
+ return -EINVAL;
+ }
+
+ async_flip = true;
}
/* can't test and expect an event at the same time. */
@@ -1450,8 +1532,8 @@ retry:
goto out;
}
- ret = drm_atomic_set_property(state, file_priv,
- obj, prop, prop_value);
+ ret = drm_atomic_set_property(state, file_priv, obj,
+ prop, prop_value, async_flip);
if (ret) {
drm_mode_object_put(obj);
goto out;
@@ -1468,6 +1550,9 @@ retry:
if (ret)
goto out;
+ if (arg->flags & DRM_MODE_PAGE_FLIP_ASYNC)
+ set_async_flip(state);
+
if (arg->flags & DRM_MODE_ATOMIC_TEST_ONLY) {
ret = drm_atomic_check_only(state);
} else if (arg->flags & DRM_MODE_ATOMIC_NONBLOCK) {
diff --git a/drivers/gpu/drm/drm_auth.c b/drivers/gpu/drm/drm_auth.c
index 2ed2585ded37..252c105d614f 100644
--- a/drivers/gpu/drm/drm_auth.c
+++ b/drivers/gpu/drm/drm_auth.c
@@ -37,13 +37,12 @@
#include <drm/drm_print.h>
#include "drm_internal.h"
-#include "drm_legacy.h"
/**
* DOC: master and authentication
*
* &struct drm_master is used to track groups of clients with open
- * primary/legacy device nodes. For every &struct drm_file which has had at
+ * primary device nodes. For every &struct drm_file which has had at
* least once successfully became the device master (either through the
* SET_MASTER IOCTL, or implicitly through opening the primary device node when
* no one else is the current master that time) there exists one &drm_master.
@@ -139,7 +138,6 @@ struct drm_master *drm_master_create(struct drm_device *dev)
return NULL;
kref_init(&master->refcount);
- drm_master_legacy_init(master);
idr_init_base(&master->magic_map, 1);
master->dev = dev;
@@ -365,8 +363,6 @@ void drm_master_release(struct drm_file *file_priv)
if (!drm_is_current_master_locked(file_priv))
goto out;
- drm_legacy_lock_master_cleanup(dev, master);
-
if (dev->master == file_priv->master)
drm_drop_master(dev, file_priv);
out:
@@ -429,8 +425,6 @@ static void drm_master_destroy(struct kref *kref)
if (drm_core_check_feature(dev, DRIVER_MODESET))
drm_lease_destroy(master);
- drm_legacy_master_rmmaps(dev, master);
-
idr_destroy(&master->magic_map);
idr_destroy(&master->leases);
idr_destroy(&master->lessee_idr);
diff --git a/drivers/gpu/drm/drm_bridge.c b/drivers/gpu/drm/drm_bridge.c
index 30d66bee0ec6..cee3188adf3d 100644
--- a/drivers/gpu/drm/drm_bridge.c
+++ b/drivers/gpu/drm/drm_bridge.c
@@ -1347,50 +1347,6 @@ struct drm_bridge *of_drm_find_bridge(struct device_node *np)
EXPORT_SYMBOL(of_drm_find_bridge);
#endif
-#ifdef CONFIG_DEBUG_FS
-static int drm_bridge_chains_info(struct seq_file *m, void *data)
-{
- struct drm_debugfs_entry *entry = m->private;
- struct drm_device *dev = entry->dev;
- struct drm_printer p = drm_seq_file_printer(m);
- struct drm_mode_config *config = &dev->mode_config;
- struct drm_encoder *encoder;
- unsigned int bridge_idx = 0;
-
- list_for_each_entry(encoder, &config->encoder_list, head) {
- struct drm_bridge *bridge;
-
- drm_printf(&p, "encoder[%u]\n", encoder->base.id);
-
- drm_for_each_bridge_in_chain(encoder, bridge) {
- drm_printf(&p, "\tbridge[%u] type: %u, ops: %#x",
- bridge_idx, bridge->type, bridge->ops);
-
-#ifdef CONFIG_OF
- if (bridge->of_node)
- drm_printf(&p, ", OF: %pOFfc", bridge->of_node);
-#endif
-
- drm_printf(&p, "\n");
-
- bridge_idx++;
- }
- }
-
- return 0;
-}
-
-static const struct drm_debugfs_info drm_bridge_debugfs_list[] = {
- { "bridge_chains", drm_bridge_chains_info, 0 },
-};
-
-void drm_bridge_debugfs_init(struct drm_device *dev)
-{
- drm_debugfs_add_files(dev, drm_bridge_debugfs_list,
- ARRAY_SIZE(drm_bridge_debugfs_list));
-}
-#endif
-
MODULE_AUTHOR("Ajay Kumar <[email protected]>");
MODULE_DESCRIPTION("DRM bridge infrastructure");
MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/gpu/drm/drm_bridge_connector.c b/drivers/gpu/drm/drm_bridge_connector.c
index 8239ad43aed5..3acd67021ec6 100644
--- a/drivers/gpu/drm/drm_bridge_connector.c
+++ b/drivers/gpu/drm/drm_bridge_connector.c
@@ -198,12 +198,6 @@ static void drm_bridge_connector_destroy(struct drm_connector *connector)
struct drm_bridge_connector *bridge_connector =
to_drm_bridge_connector(connector);
- if (bridge_connector->bridge_hpd) {
- struct drm_bridge *hpd = bridge_connector->bridge_hpd;
-
- drm_bridge_hpd_disable(hpd);
- }
-
drm_connector_unregister(connector);
drm_connector_cleanup(connector);
diff --git a/drivers/gpu/drm/drm_bufs.c b/drivers/gpu/drm/drm_bufs.c
deleted file mode 100644
index 86700560fea2..000000000000
--- a/drivers/gpu/drm/drm_bufs.c
+++ /dev/null
@@ -1,1627 +0,0 @@
-/*
- * Legacy: Generic DRM Buffer Management
- *
- * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
- * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
- * All Rights Reserved.
- *
- * Author: Rickard E. (Rik) Faith <[email protected]>
- * Author: Gareth Hughes <[email protected]>
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
-#include <linux/export.h>
-#include <linux/log2.h>
-#include <linux/mm.h>
-#include <linux/mman.h>
-#include <linux/nospec.h>
-#include <linux/pci.h>
-#include <linux/slab.h>
-#include <linux/uaccess.h>
-#include <linux/vmalloc.h>
-
-#include <asm/shmparam.h>
-
-#include <drm/drm_device.h>
-#include <drm/drm_drv.h>
-#include <drm/drm_file.h>
-#include <drm/drm_print.h>
-
-#include "drm_legacy.h"
-
-
-static struct drm_map_list *drm_find_matching_map(struct drm_device *dev,
- struct drm_local_map *map)
-{
- struct drm_map_list *entry;
-
- list_for_each_entry(entry, &dev->maplist, head) {
- /*
- * Because the kernel-userspace ABI is fixed at a 32-bit offset
- * while PCI resources may live above that, we only compare the
- * lower 32 bits of the map offset for maps of type
- * _DRM_FRAMEBUFFER or _DRM_REGISTERS.
- * It is assumed that if a driver have more than one resource
- * of each type, the lower 32 bits are different.
- */
- if (!entry->map ||
- map->type != entry->map->type ||
- entry->master != dev->master)
- continue;
- switch (map->type) {
- case _DRM_SHM:
- if (map->flags != _DRM_CONTAINS_LOCK)
- break;
- return entry;
- case _DRM_REGISTERS:
- case _DRM_FRAME_BUFFER:
- if ((entry->map->offset & 0xffffffff) ==
- (map->offset & 0xffffffff))
- return entry;
- break;
- default: /* Make gcc happy */
- break;
- }
- if (entry->map->offset == map->offset)
- return entry;
- }
-
- return NULL;
-}
-
-static int drm_map_handle(struct drm_device *dev, struct drm_hash_item *hash,
- unsigned long user_token, int hashed_handle, int shm)
-{
- int use_hashed_handle, shift;
- unsigned long add;
-
-#if (BITS_PER_LONG == 64)
- use_hashed_handle = ((user_token & 0xFFFFFFFF00000000UL) || hashed_handle);
-#elif (BITS_PER_LONG == 32)
- use_hashed_handle = hashed_handle;
-#else
-#error Unsupported long size. Neither 64 nor 32 bits.
-#endif
-
- if (!use_hashed_handle) {
- int ret;
-
- hash->key = user_token >> PAGE_SHIFT;
- ret = drm_ht_insert_item(&dev->map_hash, hash);
- if (ret != -EINVAL)
- return ret;
- }
-
- shift = 0;
- add = DRM_MAP_HASH_OFFSET >> PAGE_SHIFT;
- if (shm && (SHMLBA > PAGE_SIZE)) {
- int bits = ilog2(SHMLBA >> PAGE_SHIFT) + 1;
-
- /* For shared memory, we have to preserve the SHMLBA
- * bits of the eventual vma->vm_pgoff value during
- * mmap(). Otherwise we run into cache aliasing problems
- * on some platforms. On these platforms, the pgoff of
- * a mmap() request is used to pick a suitable virtual
- * address for the mmap() region such that it will not
- * cause cache aliasing problems.
- *
- * Therefore, make sure the SHMLBA relevant bits of the
- * hash value we use are equal to those in the original
- * kernel virtual address.
- */
- shift = bits;
- add |= ((user_token >> PAGE_SHIFT) & ((1UL << bits) - 1UL));
- }
-
- return drm_ht_just_insert_please(&dev->map_hash, hash,
- user_token, 32 - PAGE_SHIFT - 3,
- shift, add);
-}
-
-/*
- * Core function to create a range of memory available for mapping by a
- * non-root process.
- *
- * Adjusts the memory offset to its absolute value according to the mapping
- * type. Adds the map to the map list drm_device::maplist. Adds MTRR's where
- * applicable and if supported by the kernel.
- */
-static int drm_addmap_core(struct drm_device *dev, resource_size_t offset,
- unsigned int size, enum drm_map_type type,
- enum drm_map_flags flags,
- struct drm_map_list **maplist)
-{
- struct drm_local_map *map;
- struct drm_map_list *list;
- unsigned long user_token;
- int ret;
-
- map = kmalloc(sizeof(*map), GFP_KERNEL);
- if (!map)
- return -ENOMEM;
-
- map->offset = offset;
- map->size = size;
- map->flags = flags;
- map->type = type;
-
- /* Only allow shared memory to be removable since we only keep enough
- * book keeping information about shared memory to allow for removal
- * when processes fork.
- */
- if ((map->flags & _DRM_REMOVABLE) && map->type != _DRM_SHM) {
- kfree(map);
- return -EINVAL;
- }
- DRM_DEBUG("offset = 0x%08llx, size = 0x%08lx, type = %d\n",
- (unsigned long long)map->offset, map->size, map->type);
-
- /* page-align _DRM_SHM maps. They are allocated here so there is no security
- * hole created by that and it works around various broken drivers that use
- * a non-aligned quantity to map the SAREA. --BenH
- */
- if (map->type == _DRM_SHM)
- map->size = PAGE_ALIGN(map->size);
-
- if ((map->offset & (~(resource_size_t)PAGE_MASK)) || (map->size & (~PAGE_MASK))) {
- kfree(map);
- return -EINVAL;
- }
- map->mtrr = -1;
- map->handle = NULL;
-
- switch (map->type) {
- case _DRM_REGISTERS:
- case _DRM_FRAME_BUFFER:
-#if !defined(__sparc__) && !defined(__alpha__) && !defined(__ia64__) && !defined(__powerpc64__) && !defined(__x86_64__) && !defined(__arm__)
- if (map->offset + (map->size-1) < map->offset ||
- map->offset < virt_to_phys(high_memory)) {
- kfree(map);
- return -EINVAL;
- }
-#endif
- /* Some drivers preinitialize some maps, without the X Server
- * needing to be aware of it. Therefore, we just return success
- * when the server tries to create a duplicate map.
- */
- list = drm_find_matching_map(dev, map);
- if (list != NULL) {
- if (list->map->size != map->size) {
- DRM_DEBUG("Matching maps of type %d with "
- "mismatched sizes, (%ld vs %ld)\n",
- map->type, map->size,
- list->map->size);
- list->map->size = map->size;
- }
-
- kfree(map);
- *maplist = list;
- return 0;
- }
-
- if (map->type == _DRM_FRAME_BUFFER ||
- (map->flags & _DRM_WRITE_COMBINING)) {
- map->mtrr =
- arch_phys_wc_add(map->offset, map->size);
- }
- if (map->type == _DRM_REGISTERS) {
- if (map->flags & _DRM_WRITE_COMBINING)
- map->handle = ioremap_wc(map->offset,
- map->size);
- else
- map->handle = ioremap(map->offset, map->size);
- if (!map->handle) {
- kfree(map);
- return -ENOMEM;
- }
- }
-
- break;
- case _DRM_SHM:
- list = drm_find_matching_map(dev, map);
- if (list != NULL) {
- if (list->map->size != map->size) {
- DRM_DEBUG("Matching maps of type %d with "
- "mismatched sizes, (%ld vs %ld)\n",
- map->type, map->size, list->map->size);
- list->map->size = map->size;
- }
-
- kfree(map);
- *maplist = list;
- return 0;
- }
- map->handle = vmalloc_user(map->size);
- DRM_DEBUG("%lu %d %p\n",
- map->size, order_base_2(map->size), map->handle);
- if (!map->handle) {
- kfree(map);
- return -ENOMEM;
- }
- map->offset = (unsigned long)map->handle;
- if (map->flags & _DRM_CONTAINS_LOCK) {
- /* Prevent a 2nd X Server from creating a 2nd lock */
- if (dev->master->lock.hw_lock != NULL) {
- vfree(map->handle);
- kfree(map);
- return -EBUSY;
- }
- dev->sigdata.lock = dev->master->lock.hw_lock = map->handle; /* Pointer to lock */
- }
- break;
- case _DRM_AGP: {
- struct drm_agp_mem *entry;
- int valid = 0;
-
- if (!dev->agp) {
- kfree(map);
- return -EINVAL;
- }
-#ifdef __alpha__
- map->offset += dev->hose->mem_space->start;
-#endif
- /* In some cases (i810 driver), user space may have already
- * added the AGP base itself, because dev->agp->base previously
- * only got set during AGP enable. So, only add the base
- * address if the map's offset isn't already within the
- * aperture.
- */
- if (map->offset < dev->agp->base ||
- map->offset > dev->agp->base +
- dev->agp->agp_info.aper_size * 1024 * 1024 - 1) {
- map->offset += dev->agp->base;
- }
- map->mtrr = dev->agp->agp_mtrr; /* for getmap */
-
- /* This assumes the DRM is in total control of AGP space.
- * It's not always the case as AGP can be in the control
- * of user space (i.e. i810 driver). So this loop will get
- * skipped and we double check that dev->agp->memory is
- * actually set as well as being invalid before EPERM'ing
- */
- list_for_each_entry(entry, &dev->agp->memory, head) {
- if ((map->offset >= entry->bound) &&
- (map->offset + map->size <= entry->bound + entry->pages * PAGE_SIZE)) {
- valid = 1;
- break;
- }
- }
- if (!list_empty(&dev->agp->memory) && !valid) {
- kfree(map);
- return -EPERM;
- }
- DRM_DEBUG("AGP offset = 0x%08llx, size = 0x%08lx\n",
- (unsigned long long)map->offset, map->size);
-
- break;
- }
- case _DRM_SCATTER_GATHER:
- if (!dev->sg) {
- kfree(map);
- return -EINVAL;
- }
- map->offset += (unsigned long)dev->sg->virtual;
- break;
- case _DRM_CONSISTENT:
- /* dma_addr_t is 64bit on i386 with CONFIG_HIGHMEM64G,
- * As we're limiting the address to 2^32-1 (or less),
- * casting it down to 32 bits is no problem, but we
- * need to point to a 64bit variable first.
- */
- map->handle = dma_alloc_coherent(dev->dev,
- map->size,
- &map->offset,
- GFP_KERNEL);
- if (!map->handle) {
- kfree(map);
- return -ENOMEM;
- }
- break;
- default:
- kfree(map);
- return -EINVAL;
- }
-
- list = kzalloc(sizeof(*list), GFP_KERNEL);
- if (!list) {
- if (map->type == _DRM_REGISTERS)
- iounmap(map->handle);
- kfree(map);
- return -EINVAL;
- }
- list->map = map;
-
- mutex_lock(&dev->struct_mutex);
- list_add(&list->head, &dev->maplist);
-
- /* Assign a 32-bit handle */
- /* We do it here so that dev->struct_mutex protects the increment */
- user_token = (map->type == _DRM_SHM) ? (unsigned long)map->handle :
- map->offset;
- ret = drm_map_handle(dev, &list->hash, user_token, 0,
- (map->type == _DRM_SHM));
- if (ret) {
- if (map->type == _DRM_REGISTERS)
- iounmap(map->handle);
- kfree(map);
- kfree(list);
- mutex_unlock(&dev->struct_mutex);
- return ret;
- }
-
- list->user_token = list->hash.key << PAGE_SHIFT;
- mutex_unlock(&dev->struct_mutex);
-
- if (!(map->flags & _DRM_DRIVER))
- list->master = dev->master;
- *maplist = list;
- return 0;
-}
-
-int drm_legacy_addmap(struct drm_device *dev, resource_size_t offset,
- unsigned int size, enum drm_map_type type,
- enum drm_map_flags flags, struct drm_local_map **map_ptr)
-{
- struct drm_map_list *list;
- int rc;
-
- rc = drm_addmap_core(dev, offset, size, type, flags, &list);
- if (!rc)
- *map_ptr = list->map;
- return rc;
-}
-EXPORT_SYMBOL(drm_legacy_addmap);
-
-struct drm_local_map *drm_legacy_findmap(struct drm_device *dev,
- unsigned int token)
-{
- struct drm_map_list *_entry;
-
- list_for_each_entry(_entry, &dev->maplist, head)
- if (_entry->user_token == token)
- return _entry->map;
- return NULL;
-}
-EXPORT_SYMBOL(drm_legacy_findmap);
-
-/*
- * Ioctl to specify a range of memory that is available for mapping by a
- * non-root process.
- *
- * \param inode device inode.
- * \param file_priv DRM file private.
- * \param cmd command.
- * \param arg pointer to a drm_map structure.
- * \return zero on success or a negative value on error.
- *
- */
-int drm_legacy_addmap_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_map *map = data;
- struct drm_map_list *maplist;
- int err;
-
- if (!(capable(CAP_SYS_ADMIN) || map->type == _DRM_AGP || map->type == _DRM_SHM))
- return -EPERM;
-
- if (!drm_core_check_feature(dev, DRIVER_LEGACY))
- return -EOPNOTSUPP;
-
- err = drm_addmap_core(dev, map->offset, map->size, map->type,
- map->flags, &maplist);
-
- if (err)
- return err;
-
- /* avoid a warning on 64-bit, this casting isn't very nice, but the API is set so too late */
- map->handle = (void *)(unsigned long)maplist->user_token;
-
- /*
- * It appears that there are no users of this value whatsoever --
- * drmAddMap just discards it. Let's not encourage its use.
- * (Keeping drm_addmap_core's returned mtrr value would be wrong --
- * it's not a real mtrr index anymore.)
- */
- map->mtrr = -1;
-
- return 0;
-}
-
-/*
- * Get a mapping information.
- *
- * \param inode device inode.
- * \param file_priv DRM file private.
- * \param cmd command.
- * \param arg user argument, pointing to a drm_map structure.
- *
- * \return zero on success or a negative number on failure.
- *
- * Searches for the mapping with the specified offset and copies its information
- * into userspace
- */
-int drm_legacy_getmap_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_map *map = data;
- struct drm_map_list *r_list = NULL;
- struct list_head *list;
- int idx;
- int i;
-
- if (!drm_core_check_feature(dev, DRIVER_LEGACY))
- return -EOPNOTSUPP;
-
- idx = map->offset;
- if (idx < 0)
- return -EINVAL;
-
- i = 0;
- mutex_lock(&dev->struct_mutex);
- list_for_each(list, &dev->maplist) {
- if (i == idx) {
- r_list = list_entry(list, struct drm_map_list, head);
- break;
- }
- i++;
- }
- if (!r_list || !r_list->map) {
- mutex_unlock(&dev->struct_mutex);
- return -EINVAL;
- }
-
- map->offset = r_list->map->offset;
- map->size = r_list->map->size;
- map->type = r_list->map->type;
- map->flags = r_list->map->flags;
- map->handle = (void *)(unsigned long) r_list->user_token;
- map->mtrr = arch_phys_wc_index(r_list->map->mtrr);
-
- mutex_unlock(&dev->struct_mutex);
-
- return 0;
-}
-
-/*
- * Remove a map private from list and deallocate resources if the mapping
- * isn't in use.
- *
- * Searches the map on drm_device::maplist, removes it from the list, see if
- * it's being used, and free any associated resource (such as MTRR's) if it's not
- * being on use.
- *
- * \sa drm_legacy_addmap
- */
-int drm_legacy_rmmap_locked(struct drm_device *dev, struct drm_local_map *map)
-{
- struct drm_map_list *r_list = NULL, *list_t;
- int found = 0;
- struct drm_master *master;
-
- /* Find the list entry for the map and remove it */
- list_for_each_entry_safe(r_list, list_t, &dev->maplist, head) {
- if (r_list->map == map) {
- master = r_list->master;
- list_del(&r_list->head);
- drm_ht_remove_key(&dev->map_hash,
- r_list->user_token >> PAGE_SHIFT);
- kfree(r_list);
- found = 1;
- break;
- }
- }
-
- if (!found)
- return -EINVAL;
-
- switch (map->type) {
- case _DRM_REGISTERS:
- iounmap(map->handle);
- fallthrough;
- case _DRM_FRAME_BUFFER:
- arch_phys_wc_del(map->mtrr);
- break;
- case _DRM_SHM:
- vfree(map->handle);
- if (master) {
- if (dev->sigdata.lock == master->lock.hw_lock)
- dev->sigdata.lock = NULL;
- master->lock.hw_lock = NULL; /* SHM removed */
- master->lock.file_priv = NULL;
- wake_up_interruptible_all(&master->lock.lock_queue);
- }
- break;
- case _DRM_AGP:
- case _DRM_SCATTER_GATHER:
- break;
- case _DRM_CONSISTENT:
- dma_free_coherent(dev->dev,
- map->size,
- map->handle,
- map->offset);
- break;
- }
- kfree(map);
-
- return 0;
-}
-EXPORT_SYMBOL(drm_legacy_rmmap_locked);
-
-void drm_legacy_rmmap(struct drm_device *dev, struct drm_local_map *map)
-{
- if (!drm_core_check_feature(dev, DRIVER_LEGACY))
- return;
-
- mutex_lock(&dev->struct_mutex);
- drm_legacy_rmmap_locked(dev, map);
- mutex_unlock(&dev->struct_mutex);
-}
-EXPORT_SYMBOL(drm_legacy_rmmap);
-
-void drm_legacy_master_rmmaps(struct drm_device *dev, struct drm_master *master)
-{
- struct drm_map_list *r_list, *list_temp;
-
- if (!drm_core_check_feature(dev, DRIVER_LEGACY))
- return;
-
- mutex_lock(&dev->struct_mutex);
- list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head) {
- if (r_list->master == master) {
- drm_legacy_rmmap_locked(dev, r_list->map);
- r_list = NULL;
- }
- }
- mutex_unlock(&dev->struct_mutex);
-}
-
-void drm_legacy_rmmaps(struct drm_device *dev)
-{
- struct drm_map_list *r_list, *list_temp;
-
- list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head)
- drm_legacy_rmmap(dev, r_list->map);
-}
-
-/* The rmmap ioctl appears to be unnecessary. All mappings are torn down on
- * the last close of the device, and this is necessary for cleanup when things
- * exit uncleanly. Therefore, having userland manually remove mappings seems
- * like a pointless exercise since they're going away anyway.
- *
- * One use case might be after addmap is allowed for normal users for SHM and
- * gets used by drivers that the server doesn't need to care about. This seems
- * unlikely.
- *
- * \param inode device inode.
- * \param file_priv DRM file private.
- * \param cmd command.
- * \param arg pointer to a struct drm_map structure.
- * \return zero on success or a negative value on error.
- */
-int drm_legacy_rmmap_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_map *request = data;
- struct drm_local_map *map = NULL;
- struct drm_map_list *r_list;
- int ret;
-
- if (!drm_core_check_feature(dev, DRIVER_LEGACY))
- return -EOPNOTSUPP;
-
- mutex_lock(&dev->struct_mutex);
- list_for_each_entry(r_list, &dev->maplist, head) {
- if (r_list->map &&
- r_list->user_token == (unsigned long)request->handle &&
- r_list->map->flags & _DRM_REMOVABLE) {
- map = r_list->map;
- break;
- }
- }
-
- /* List has wrapped around to the head pointer, or it's empty we didn't
- * find anything.
- */
- if (list_empty(&dev->maplist) || !map) {
- mutex_unlock(&dev->struct_mutex);
- return -EINVAL;
- }
-
- /* Register and framebuffer maps are permanent */
- if ((map->type == _DRM_REGISTERS) || (map->type == _DRM_FRAME_BUFFER)) {
- mutex_unlock(&dev->struct_mutex);
- return 0;
- }
-
- ret = drm_legacy_rmmap_locked(dev, map);
-
- mutex_unlock(&dev->struct_mutex);
-
- return ret;
-}
-
-/*
- * Cleanup after an error on one of the addbufs() functions.
- *
- * \param dev DRM device.
- * \param entry buffer entry where the error occurred.
- *
- * Frees any pages and buffers associated with the given entry.
- */
-static void drm_cleanup_buf_error(struct drm_device *dev,
- struct drm_buf_entry *entry)
-{
- drm_dma_handle_t *dmah;
- int i;
-
- if (entry->seg_count) {
- for (i = 0; i < entry->seg_count; i++) {
- if (entry->seglist[i]) {
- dmah = entry->seglist[i];
- dma_free_coherent(dev->dev,
- dmah->size,
- dmah->vaddr,
- dmah->busaddr);
- kfree(dmah);
- }
- }
- kfree(entry->seglist);
-
- entry->seg_count = 0;
- }
-
- if (entry->buf_count) {
- for (i = 0; i < entry->buf_count; i++) {
- kfree(entry->buflist[i].dev_private);
- }
- kfree(entry->buflist);
-
- entry->buf_count = 0;
- }
-}
-
-#if IS_ENABLED(CONFIG_AGP)
-/*
- * Add AGP buffers for DMA transfers.
- *
- * \param dev struct drm_device to which the buffers are to be added.
- * \param request pointer to a struct drm_buf_desc describing the request.
- * \return zero on success or a negative number on failure.
- *
- * After some sanity checks creates a drm_buf structure for each buffer and
- * reallocates the buffer list of the same size order to accommodate the new
- * buffers.
- */
-int drm_legacy_addbufs_agp(struct drm_device *dev,
- struct drm_buf_desc *request)
-{
- struct drm_device_dma *dma = dev->dma;
- struct drm_buf_entry *entry;
- struct drm_agp_mem *agp_entry;
- struct drm_buf *buf;
- unsigned long offset;
- unsigned long agp_offset;
- int count;
- int order;
- int size;
- int alignment;
- int page_order;
- int total;
- int byte_count;
- int i, valid;
- struct drm_buf **temp_buflist;
-
- if (!dma)
- return -EINVAL;
-
- count = request->count;
- order = order_base_2(request->size);
- size = 1 << order;
-
- alignment = (request->flags & _DRM_PAGE_ALIGN)
- ? PAGE_ALIGN(size) : size;
- page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
- total = PAGE_SIZE << page_order;
-
- byte_count = 0;
- agp_offset = dev->agp->base + request->agp_start;
-
- DRM_DEBUG("count: %d\n", count);
- DRM_DEBUG("order: %d\n", order);
- DRM_DEBUG("size: %d\n", size);
- DRM_DEBUG("agp_offset: %lx\n", agp_offset);
- DRM_DEBUG("alignment: %d\n", alignment);
- DRM_DEBUG("page_order: %d\n", page_order);
- DRM_DEBUG("total: %d\n", total);
-
- if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
- return -EINVAL;
-
- /* Make sure buffers are located in AGP memory that we own */
- valid = 0;
- list_for_each_entry(agp_entry, &dev->agp->memory, head) {
- if ((agp_offset >= agp_entry->bound) &&
- (agp_offset + total * count <= agp_entry->bound + agp_entry->pages * PAGE_SIZE)) {
- valid = 1;
- break;
- }
- }
- if (!list_empty(&dev->agp->memory) && !valid) {
- DRM_DEBUG("zone invalid\n");
- return -EINVAL;
- }
- spin_lock(&dev->buf_lock);
- if (dev->buf_use) {
- spin_unlock(&dev->buf_lock);
- return -EBUSY;
- }
- atomic_inc(&dev->buf_alloc);
- spin_unlock(&dev->buf_lock);
-
- mutex_lock(&dev->struct_mutex);
- entry = &dma->bufs[order];
- if (entry->buf_count) {
- mutex_unlock(&dev->struct_mutex);
- atomic_dec(&dev->buf_alloc);
- return -ENOMEM; /* May only call once for each order */
- }
-
- if (count < 0 || count > 4096) {
- mutex_unlock(&dev->struct_mutex);
- atomic_dec(&dev->buf_alloc);
- return -EINVAL;
- }
-
- entry->buflist = kcalloc(count, sizeof(*entry->buflist), GFP_KERNEL);
- if (!entry->buflist) {
- mutex_unlock(&dev->struct_mutex);
- atomic_dec(&dev->buf_alloc);
- return -ENOMEM;
- }
-
- entry->buf_size = size;
- entry->page_order = page_order;
-
- offset = 0;
-
- while (entry->buf_count < count) {
- buf = &entry->buflist[entry->buf_count];
- buf->idx = dma->buf_count + entry->buf_count;
- buf->total = alignment;
- buf->order = order;
- buf->used = 0;
-
- buf->offset = (dma->byte_count + offset);
- buf->bus_address = agp_offset + offset;
- buf->address = (void *)(agp_offset + offset);
- buf->next = NULL;
- buf->waiting = 0;
- buf->pending = 0;
- buf->file_priv = NULL;
-
- buf->dev_priv_size = dev->driver->dev_priv_size;
- buf->dev_private = kzalloc(buf->dev_priv_size, GFP_KERNEL);
- if (!buf->dev_private) {
- /* Set count correctly so we free the proper amount. */
- entry->buf_count = count;
- drm_cleanup_buf_error(dev, entry);
- mutex_unlock(&dev->struct_mutex);
- atomic_dec(&dev->buf_alloc);
- return -ENOMEM;
- }
-
- DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);
-
- offset += alignment;
- entry->buf_count++;
- byte_count += PAGE_SIZE << page_order;
- }
-
- DRM_DEBUG("byte_count: %d\n", byte_count);
-
- temp_buflist = krealloc(dma->buflist,
- (dma->buf_count + entry->buf_count) *
- sizeof(*dma->buflist), GFP_KERNEL);
- if (!temp_buflist) {
- /* Free the entry because it isn't valid */
- drm_cleanup_buf_error(dev, entry);
- mutex_unlock(&dev->struct_mutex);
- atomic_dec(&dev->buf_alloc);
- return -ENOMEM;
- }
- dma->buflist = temp_buflist;
-
- for (i = 0; i < entry->buf_count; i++) {
- dma->buflist[i + dma->buf_count] = &entry->buflist[i];
- }
-
- dma->buf_count += entry->buf_count;
- dma->seg_count += entry->seg_count;
- dma->page_count += byte_count >> PAGE_SHIFT;
- dma->byte_count += byte_count;
-
- DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
- DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
-
- mutex_unlock(&dev->struct_mutex);
-
- request->count = entry->buf_count;
- request->size = size;
-
- dma->flags = _DRM_DMA_USE_AGP;
-
- atomic_dec(&dev->buf_alloc);
- return 0;
-}
-EXPORT_SYMBOL(drm_legacy_addbufs_agp);
-#endif /* CONFIG_AGP */
-
-int drm_legacy_addbufs_pci(struct drm_device *dev,
- struct drm_buf_desc *request)
-{
- struct drm_device_dma *dma = dev->dma;
- int count;
- int order;
- int size;
- int total;
- int page_order;
- struct drm_buf_entry *entry;
- drm_dma_handle_t *dmah;
- struct drm_buf *buf;
- int alignment;
- unsigned long offset;
- int i;
- int byte_count;
- int page_count;
- unsigned long *temp_pagelist;
- struct drm_buf **temp_buflist;
-
- if (!drm_core_check_feature(dev, DRIVER_PCI_DMA))
- return -EOPNOTSUPP;
-
- if (!dma)
- return -EINVAL;
-
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
-
- count = request->count;
- order = order_base_2(request->size);
- size = 1 << order;
-
- DRM_DEBUG("count=%d, size=%d (%d), order=%d\n",
- request->count, request->size, size, order);
-
- if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
- return -EINVAL;
-
- alignment = (request->flags & _DRM_PAGE_ALIGN)
- ? PAGE_ALIGN(size) : size;
- page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
- total = PAGE_SIZE << page_order;
-
- spin_lock(&dev->buf_lock);
- if (dev->buf_use) {
- spin_unlock(&dev->buf_lock);
- return -EBUSY;
- }
- atomic_inc(&dev->buf_alloc);
- spin_unlock(&dev->buf_lock);
-
- mutex_lock(&dev->struct_mutex);
- entry = &dma->bufs[order];
- if (entry->buf_count) {
- mutex_unlock(&dev->struct_mutex);
- atomic_dec(&dev->buf_alloc);
- return -ENOMEM; /* May only call once for each order */
- }
-
- if (count < 0 || count > 4096) {
- mutex_unlock(&dev->struct_mutex);
- atomic_dec(&dev->buf_alloc);
- return -EINVAL;
- }
-
- entry->buflist = kcalloc(count, sizeof(*entry->buflist), GFP_KERNEL);
- if (!entry->buflist) {
- mutex_unlock(&dev->struct_mutex);
- atomic_dec(&dev->buf_alloc);
- return -ENOMEM;
- }
-
- entry->seglist = kcalloc(count, sizeof(*entry->seglist), GFP_KERNEL);
- if (!entry->seglist) {
- kfree(entry->buflist);
- mutex_unlock(&dev->struct_mutex);
- atomic_dec(&dev->buf_alloc);
- return -ENOMEM;
- }
-
- /* Keep the original pagelist until we know all the allocations
- * have succeeded
- */
- temp_pagelist = kmalloc_array(dma->page_count + (count << page_order),
- sizeof(*dma->pagelist),
- GFP_KERNEL);
- if (!temp_pagelist) {
- kfree(entry->buflist);
- kfree(entry->seglist);
- mutex_unlock(&dev->struct_mutex);
- atomic_dec(&dev->buf_alloc);
- return -ENOMEM;
- }
- memcpy(temp_pagelist,
- dma->pagelist, dma->page_count * sizeof(*dma->pagelist));
- DRM_DEBUG("pagelist: %d entries\n",
- dma->page_count + (count << page_order));
-
- entry->buf_size = size;
- entry->page_order = page_order;
- byte_count = 0;
- page_count = 0;
-
- while (entry->buf_count < count) {
- dmah = kmalloc(sizeof(drm_dma_handle_t), GFP_KERNEL);
- if (!dmah) {
- /* Set count correctly so we free the proper amount. */
- entry->buf_count = count;
- entry->seg_count = count;
- drm_cleanup_buf_error(dev, entry);
- kfree(temp_pagelist);
- mutex_unlock(&dev->struct_mutex);
- atomic_dec(&dev->buf_alloc);
- return -ENOMEM;
- }
-
- dmah->size = total;
- dmah->vaddr = dma_alloc_coherent(dev->dev,
- dmah->size,
- &dmah->busaddr,
- GFP_KERNEL);
- if (!dmah->vaddr) {
- kfree(dmah);
-
- /* Set count correctly so we free the proper amount. */
- entry->buf_count = count;
- entry->seg_count = count;
- drm_cleanup_buf_error(dev, entry);
- kfree(temp_pagelist);
- mutex_unlock(&dev->struct_mutex);
- atomic_dec(&dev->buf_alloc);
- return -ENOMEM;
- }
- entry->seglist[entry->seg_count++] = dmah;
- for (i = 0; i < (1 << page_order); i++) {
- DRM_DEBUG("page %d @ 0x%08lx\n",
- dma->page_count + page_count,
- (unsigned long)dmah->vaddr + PAGE_SIZE * i);
- temp_pagelist[dma->page_count + page_count++]
- = (unsigned long)dmah->vaddr + PAGE_SIZE * i;
- }
- for (offset = 0;
- offset + size <= total && entry->buf_count < count;
- offset += alignment, ++entry->buf_count) {
- buf = &entry->buflist[entry->buf_count];
- buf->idx = dma->buf_count + entry->buf_count;
- buf->total = alignment;
- buf->order = order;
- buf->used = 0;
- buf->offset = (dma->byte_count + byte_count + offset);
- buf->address = (void *)(dmah->vaddr + offset);
- buf->bus_address = dmah->busaddr + offset;
- buf->next = NULL;
- buf->waiting = 0;
- buf->pending = 0;
- buf->file_priv = NULL;
-
- buf->dev_priv_size = dev->driver->dev_priv_size;
- buf->dev_private = kzalloc(buf->dev_priv_size,
- GFP_KERNEL);
- if (!buf->dev_private) {
- /* Set count correctly so we free the proper amount. */
- entry->buf_count = count;
- entry->seg_count = count;
- drm_cleanup_buf_error(dev, entry);
- kfree(temp_pagelist);
- mutex_unlock(&dev->struct_mutex);
- atomic_dec(&dev->buf_alloc);
- return -ENOMEM;
- }
-
- DRM_DEBUG("buffer %d @ %p\n",
- entry->buf_count, buf->address);
- }
- byte_count += PAGE_SIZE << page_order;
- }
-
- temp_buflist = krealloc(dma->buflist,
- (dma->buf_count + entry->buf_count) *
- sizeof(*dma->buflist), GFP_KERNEL);
- if (!temp_buflist) {
- /* Free the entry because it isn't valid */
- drm_cleanup_buf_error(dev, entry);
- kfree(temp_pagelist);
- mutex_unlock(&dev->struct_mutex);
- atomic_dec(&dev->buf_alloc);
- return -ENOMEM;
- }
- dma->buflist = temp_buflist;
-
- for (i = 0; i < entry->buf_count; i++) {
- dma->buflist[i + dma->buf_count] = &entry->buflist[i];
- }
-
- /* No allocations failed, so now we can replace the original pagelist
- * with the new one.
- */
- if (dma->page_count) {
- kfree(dma->pagelist);
- }
- dma->pagelist = temp_pagelist;
-
- dma->buf_count += entry->buf_count;
- dma->seg_count += entry->seg_count;
- dma->page_count += entry->seg_count << page_order;
- dma->byte_count += PAGE_SIZE * (entry->seg_count << page_order);
-
- mutex_unlock(&dev->struct_mutex);
-
- request->count = entry->buf_count;
- request->size = size;
-
- if (request->flags & _DRM_PCI_BUFFER_RO)
- dma->flags = _DRM_DMA_USE_PCI_RO;
-
- atomic_dec(&dev->buf_alloc);
- return 0;
-
-}
-EXPORT_SYMBOL(drm_legacy_addbufs_pci);
-
-static int drm_legacy_addbufs_sg(struct drm_device *dev,
- struct drm_buf_desc *request)
-{
- struct drm_device_dma *dma = dev->dma;
- struct drm_buf_entry *entry;
- struct drm_buf *buf;
- unsigned long offset;
- unsigned long agp_offset;
- int count;
- int order;
- int size;
- int alignment;
- int page_order;
- int total;
- int byte_count;
- int i;
- struct drm_buf **temp_buflist;
-
- if (!drm_core_check_feature(dev, DRIVER_SG))
- return -EOPNOTSUPP;
-
- if (!dma)
- return -EINVAL;
-
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
-
- count = request->count;
- order = order_base_2(request->size);
- size = 1 << order;
-
- alignment = (request->flags & _DRM_PAGE_ALIGN)
- ? PAGE_ALIGN(size) : size;
- page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
- total = PAGE_SIZE << page_order;
-
- byte_count = 0;
- agp_offset = request->agp_start;
-
- DRM_DEBUG("count: %d\n", count);
- DRM_DEBUG("order: %d\n", order);
- DRM_DEBUG("size: %d\n", size);
- DRM_DEBUG("agp_offset: %lu\n", agp_offset);
- DRM_DEBUG("alignment: %d\n", alignment);
- DRM_DEBUG("page_order: %d\n", page_order);
- DRM_DEBUG("total: %d\n", total);
-
- if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
- return -EINVAL;
-
- spin_lock(&dev->buf_lock);
- if (dev->buf_use) {
- spin_unlock(&dev->buf_lock);
- return -EBUSY;
- }
- atomic_inc(&dev->buf_alloc);
- spin_unlock(&dev->buf_lock);
-
- mutex_lock(&dev->struct_mutex);
- entry = &dma->bufs[order];
- if (entry->buf_count) {
- mutex_unlock(&dev->struct_mutex);
- atomic_dec(&dev->buf_alloc);
- return -ENOMEM; /* May only call once for each order */
- }
-
- if (count < 0 || count > 4096) {
- mutex_unlock(&dev->struct_mutex);
- atomic_dec(&dev->buf_alloc);
- return -EINVAL;
- }
-
- entry->buflist = kcalloc(count, sizeof(*entry->buflist), GFP_KERNEL);
- if (!entry->buflist) {
- mutex_unlock(&dev->struct_mutex);
- atomic_dec(&dev->buf_alloc);
- return -ENOMEM;
- }
-
- entry->buf_size = size;
- entry->page_order = page_order;
-
- offset = 0;
-
- while (entry->buf_count < count) {
- buf = &entry->buflist[entry->buf_count];
- buf->idx = dma->buf_count + entry->buf_count;
- buf->total = alignment;
- buf->order = order;
- buf->used = 0;
-
- buf->offset = (dma->byte_count + offset);
- buf->bus_address = agp_offset + offset;
- buf->address = (void *)(agp_offset + offset
- + (unsigned long)dev->sg->virtual);
- buf->next = NULL;
- buf->waiting = 0;
- buf->pending = 0;
- buf->file_priv = NULL;
-
- buf->dev_priv_size = dev->driver->dev_priv_size;
- buf->dev_private = kzalloc(buf->dev_priv_size, GFP_KERNEL);
- if (!buf->dev_private) {
- /* Set count correctly so we free the proper amount. */
- entry->buf_count = count;
- drm_cleanup_buf_error(dev, entry);
- mutex_unlock(&dev->struct_mutex);
- atomic_dec(&dev->buf_alloc);
- return -ENOMEM;
- }
-
- DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);
-
- offset += alignment;
- entry->buf_count++;
- byte_count += PAGE_SIZE << page_order;
- }
-
- DRM_DEBUG("byte_count: %d\n", byte_count);
-
- temp_buflist = krealloc(dma->buflist,
- (dma->buf_count + entry->buf_count) *
- sizeof(*dma->buflist), GFP_KERNEL);
- if (!temp_buflist) {
- /* Free the entry because it isn't valid */
- drm_cleanup_buf_error(dev, entry);
- mutex_unlock(&dev->struct_mutex);
- atomic_dec(&dev->buf_alloc);
- return -ENOMEM;
- }
- dma->buflist = temp_buflist;
-
- for (i = 0; i < entry->buf_count; i++) {
- dma->buflist[i + dma->buf_count] = &entry->buflist[i];
- }
-
- dma->buf_count += entry->buf_count;
- dma->seg_count += entry->seg_count;
- dma->page_count += byte_count >> PAGE_SHIFT;
- dma->byte_count += byte_count;
-
- DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
- DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
-
- mutex_unlock(&dev->struct_mutex);
-
- request->count = entry->buf_count;
- request->size = size;
-
- dma->flags = _DRM_DMA_USE_SG;
-
- atomic_dec(&dev->buf_alloc);
- return 0;
-}
-
-/*
- * Add buffers for DMA transfers (ioctl).
- *
- * \param inode device inode.
- * \param file_priv DRM file private.
- * \param cmd command.
- * \param arg pointer to a struct drm_buf_desc request.
- * \return zero on success or a negative number on failure.
- *
- * According with the memory type specified in drm_buf_desc::flags and the
- * build options, it dispatches the call either to addbufs_agp(),
- * addbufs_sg() or addbufs_pci() for AGP, scatter-gather or consistent
- * PCI memory respectively.
- */
-int drm_legacy_addbufs(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_buf_desc *request = data;
- int ret;
-
- if (!drm_core_check_feature(dev, DRIVER_LEGACY))
- return -EOPNOTSUPP;
-
- if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
- return -EOPNOTSUPP;
-
-#if IS_ENABLED(CONFIG_AGP)
- if (request->flags & _DRM_AGP_BUFFER)
- ret = drm_legacy_addbufs_agp(dev, request);
- else
-#endif
- if (request->flags & _DRM_SG_BUFFER)
- ret = drm_legacy_addbufs_sg(dev, request);
- else if (request->flags & _DRM_FB_BUFFER)
- ret = -EINVAL;
- else
- ret = drm_legacy_addbufs_pci(dev, request);
-
- return ret;
-}
-
-/*
- * Get information about the buffer mappings.
- *
- * This was originally mean for debugging purposes, or by a sophisticated
- * client library to determine how best to use the available buffers (e.g.,
- * large buffers can be used for image transfer).
- *
- * \param inode device inode.
- * \param file_priv DRM file private.
- * \param cmd command.
- * \param arg pointer to a drm_buf_info structure.
- * \return zero on success or a negative number on failure.
- *
- * Increments drm_device::buf_use while holding the drm_device::buf_lock
- * lock, preventing of allocating more buffers after this call. Information
- * about each requested buffer is then copied into user space.
- */
-int __drm_legacy_infobufs(struct drm_device *dev,
- void *data, int *p,
- int (*f)(void *, int, struct drm_buf_entry *))
-{
- struct drm_device_dma *dma = dev->dma;
- int i;
- int count;
-
- if (!drm_core_check_feature(dev, DRIVER_LEGACY))
- return -EOPNOTSUPP;
-
- if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
- return -EOPNOTSUPP;
-
- if (!dma)
- return -EINVAL;
-
- spin_lock(&dev->buf_lock);
- if (atomic_read(&dev->buf_alloc)) {
- spin_unlock(&dev->buf_lock);
- return -EBUSY;
- }
- ++dev->buf_use; /* Can't allocate more after this call */
- spin_unlock(&dev->buf_lock);
-
- for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) {
- if (dma->bufs[i].buf_count)
- ++count;
- }
-
- DRM_DEBUG("count = %d\n", count);
-
- if (*p >= count) {
- for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) {
- struct drm_buf_entry *from = &dma->bufs[i];
-
- if (from->buf_count) {
- if (f(data, count, from) < 0)
- return -EFAULT;
- DRM_DEBUG("%d %d %d %d %d\n",
- i,
- dma->bufs[i].buf_count,
- dma->bufs[i].buf_size,
- dma->bufs[i].low_mark,
- dma->bufs[i].high_mark);
- ++count;
- }
- }
- }
- *p = count;
-
- return 0;
-}
-
-static int copy_one_buf(void *data, int count, struct drm_buf_entry *from)
-{
- struct drm_buf_info *request = data;
- struct drm_buf_desc __user *to = &request->list[count];
- struct drm_buf_desc v = {.count = from->buf_count,
- .size = from->buf_size,
- .low_mark = from->low_mark,
- .high_mark = from->high_mark};
-
- if (copy_to_user(to, &v, offsetof(struct drm_buf_desc, flags)))
- return -EFAULT;
- return 0;
-}
-
-int drm_legacy_infobufs(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_buf_info *request = data;
-
- return __drm_legacy_infobufs(dev, data, &request->count, copy_one_buf);
-}
-
-/*
- * Specifies a low and high water mark for buffer allocation
- *
- * \param inode device inode.
- * \param file_priv DRM file private.
- * \param cmd command.
- * \param arg a pointer to a drm_buf_desc structure.
- * \return zero on success or a negative number on failure.
- *
- * Verifies that the size order is bounded between the admissible orders and
- * updates the respective drm_device_dma::bufs entry low and high water mark.
- *
- * \note This ioctl is deprecated and mostly never used.
- */
-int drm_legacy_markbufs(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_device_dma *dma = dev->dma;
- struct drm_buf_desc *request = data;
- int order;
- struct drm_buf_entry *entry;
-
- if (!drm_core_check_feature(dev, DRIVER_LEGACY))
- return -EOPNOTSUPP;
-
- if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
- return -EOPNOTSUPP;
-
- if (!dma)
- return -EINVAL;
-
- DRM_DEBUG("%d, %d, %d\n",
- request->size, request->low_mark, request->high_mark);
- order = order_base_2(request->size);
- if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
- return -EINVAL;
- entry = &dma->bufs[order];
-
- if (request->low_mark < 0 || request->low_mark > entry->buf_count)
- return -EINVAL;
- if (request->high_mark < 0 || request->high_mark > entry->buf_count)
- return -EINVAL;
-
- entry->low_mark = request->low_mark;
- entry->high_mark = request->high_mark;
-
- return 0;
-}
-
-/*
- * Unreserve the buffers in list, previously reserved using drmDMA.
- *
- * \param inode device inode.
- * \param file_priv DRM file private.
- * \param cmd command.
- * \param arg pointer to a drm_buf_free structure.
- * \return zero on success or a negative number on failure.
- *
- * Calls free_buffer() for each used buffer.
- * This function is primarily used for debugging.
- */
-int drm_legacy_freebufs(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_device_dma *dma = dev->dma;
- struct drm_buf_free *request = data;
- int i;
- int idx;
- struct drm_buf *buf;
-
- if (!drm_core_check_feature(dev, DRIVER_LEGACY))
- return -EOPNOTSUPP;
-
- if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
- return -EOPNOTSUPP;
-
- if (!dma)
- return -EINVAL;
-
- DRM_DEBUG("%d\n", request->count);
- for (i = 0; i < request->count; i++) {
- if (copy_from_user(&idx, &request->list[i], sizeof(idx)))
- return -EFAULT;
- if (idx < 0 || idx >= dma->buf_count) {
- DRM_ERROR("Index %d (of %d max)\n",
- idx, dma->buf_count - 1);
- return -EINVAL;
- }
- idx = array_index_nospec(idx, dma->buf_count);
- buf = dma->buflist[idx];
- if (buf->file_priv != file_priv) {
- DRM_ERROR("Process %d freeing buffer not owned\n",
- task_pid_nr(current));
- return -EINVAL;
- }
- drm_legacy_free_buffer(dev, buf);
- }
-
- return 0;
-}
-
-/*
- * Maps all of the DMA buffers into client-virtual space (ioctl).
- *
- * \param inode device inode.
- * \param file_priv DRM file private.
- * \param cmd command.
- * \param arg pointer to a drm_buf_map structure.
- * \return zero on success or a negative number on failure.
- *
- * Maps the AGP, SG or PCI buffer region with vm_mmap(), and copies information
- * about each buffer into user space. For PCI buffers, it calls vm_mmap() with
- * offset equal to 0, which drm_mmap() interprets as PCI buffers and calls
- * drm_mmap_dma().
- */
-int __drm_legacy_mapbufs(struct drm_device *dev, void *data, int *p,
- void __user **v,
- int (*f)(void *, int, unsigned long,
- struct drm_buf *),
- struct drm_file *file_priv)
-{
- struct drm_device_dma *dma = dev->dma;
- int retcode = 0;
- unsigned long virtual;
- int i;
-
- if (!drm_core_check_feature(dev, DRIVER_LEGACY))
- return -EOPNOTSUPP;
-
- if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
- return -EOPNOTSUPP;
-
- if (!dma)
- return -EINVAL;
-
- spin_lock(&dev->buf_lock);
- if (atomic_read(&dev->buf_alloc)) {
- spin_unlock(&dev->buf_lock);
- return -EBUSY;
- }
- dev->buf_use++; /* Can't allocate more after this call */
- spin_unlock(&dev->buf_lock);
-
- if (*p >= dma->buf_count) {
- if ((dev->agp && (dma->flags & _DRM_DMA_USE_AGP))
- || (drm_core_check_feature(dev, DRIVER_SG)
- && (dma->flags & _DRM_DMA_USE_SG))) {
- struct drm_local_map *map = dev->agp_buffer_map;
- unsigned long token = dev->agp_buffer_token;
-
- if (!map) {
- retcode = -EINVAL;
- goto done;
- }
- virtual = vm_mmap(file_priv->filp, 0, map->size,
- PROT_READ | PROT_WRITE,
- MAP_SHARED,
- token);
- } else {
- virtual = vm_mmap(file_priv->filp, 0, dma->byte_count,
- PROT_READ | PROT_WRITE,
- MAP_SHARED, 0);
- }
- if (virtual > -1024UL) {
- /* Real error */
- retcode = (signed long)virtual;
- goto done;
- }
- *v = (void __user *)virtual;
-
- for (i = 0; i < dma->buf_count; i++) {
- if (f(data, i, virtual, dma->buflist[i]) < 0) {
- retcode = -EFAULT;
- goto done;
- }
- }
- }
- done:
- *p = dma->buf_count;
- DRM_DEBUG("%d buffers, retcode = %d\n", *p, retcode);
-
- return retcode;
-}
-
-static int map_one_buf(void *data, int idx, unsigned long virtual,
- struct drm_buf *buf)
-{
- struct drm_buf_map *request = data;
- unsigned long address = virtual + buf->offset; /* *** */
-
- if (copy_to_user(&request->list[idx].idx, &buf->idx,
- sizeof(request->list[0].idx)))
- return -EFAULT;
- if (copy_to_user(&request->list[idx].total, &buf->total,
- sizeof(request->list[0].total)))
- return -EFAULT;
- if (clear_user(&request->list[idx].used, sizeof(int)))
- return -EFAULT;
- if (copy_to_user(&request->list[idx].address, &address,
- sizeof(address)))
- return -EFAULT;
- return 0;
-}
-
-int drm_legacy_mapbufs(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_buf_map *request = data;
-
- return __drm_legacy_mapbufs(dev, data, &request->count,
- &request->virtual, map_one_buf,
- file_priv);
-}
-
-int drm_legacy_dma_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- if (!drm_core_check_feature(dev, DRIVER_LEGACY))
- return -EOPNOTSUPP;
-
- if (dev->driver->dma_ioctl)
- return dev->driver->dma_ioctl(dev, data, file_priv);
- else
- return -EINVAL;
-}
-
-struct drm_local_map *drm_legacy_getsarea(struct drm_device *dev)
-{
- struct drm_map_list *entry;
-
- list_for_each_entry(entry, &dev->maplist, head) {
- if (entry->map && entry->map->type == _DRM_SHM &&
- (entry->map->flags & _DRM_CONTAINS_LOCK)) {
- return entry->map;
- }
- }
- return NULL;
-}
-EXPORT_SYMBOL(drm_legacy_getsarea);
diff --git a/drivers/gpu/drm/drm_context.c b/drivers/gpu/drm/drm_context.c
deleted file mode 100644
index a0fc779e5e1e..000000000000
--- a/drivers/gpu/drm/drm_context.c
+++ /dev/null
@@ -1,513 +0,0 @@
-/*
- * Legacy: Generic DRM Contexts
- *
- * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
- * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
- * All Rights Reserved.
- *
- * Author: Rickard E. (Rik) Faith <[email protected]>
- * Author: Gareth Hughes <[email protected]>
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
-#include <linux/slab.h>
-#include <linux/uaccess.h>
-
-#include <drm/drm_drv.h>
-#include <drm/drm_file.h>
-#include <drm/drm_print.h>
-
-#include "drm_legacy.h"
-
-struct drm_ctx_list {
- struct list_head head;
- drm_context_t handle;
- struct drm_file *tag;
-};
-
-/******************************************************************/
-/** \name Context bitmap support */
-/*@{*/
-
-/*
- * Free a handle from the context bitmap.
- *
- * \param dev DRM device.
- * \param ctx_handle context handle.
- *
- * Clears the bit specified by \p ctx_handle in drm_device::ctx_bitmap and the entry
- * in drm_device::ctx_idr, while holding the drm_device::struct_mutex
- * lock.
- */
-void drm_legacy_ctxbitmap_free(struct drm_device * dev, int ctx_handle)
-{
- if (!drm_core_check_feature(dev, DRIVER_LEGACY))
- return;
-
- mutex_lock(&dev->struct_mutex);
- idr_remove(&dev->ctx_idr, ctx_handle);
- mutex_unlock(&dev->struct_mutex);
-}
-
-/*
- * Context bitmap allocation.
- *
- * \param dev DRM device.
- * \return (non-negative) context handle on success or a negative number on failure.
- *
- * Allocate a new idr from drm_device::ctx_idr while holding the
- * drm_device::struct_mutex lock.
- */
-static int drm_legacy_ctxbitmap_next(struct drm_device * dev)
-{
- int ret;
-
- mutex_lock(&dev->struct_mutex);
- ret = idr_alloc(&dev->ctx_idr, NULL, DRM_RESERVED_CONTEXTS, 0,
- GFP_KERNEL);
- mutex_unlock(&dev->struct_mutex);
- return ret;
-}
-
-/*
- * Context bitmap initialization.
- *
- * \param dev DRM device.
- *
- * Initialise the drm_device::ctx_idr
- */
-void drm_legacy_ctxbitmap_init(struct drm_device * dev)
-{
- if (!drm_core_check_feature(dev, DRIVER_LEGACY))
- return;
-
- idr_init(&dev->ctx_idr);
-}
-
-/*
- * Context bitmap cleanup.
- *
- * \param dev DRM device.
- *
- * Free all idr members using drm_ctx_sarea_free helper function
- * while holding the drm_device::struct_mutex lock.
- */
-void drm_legacy_ctxbitmap_cleanup(struct drm_device * dev)
-{
- if (!drm_core_check_feature(dev, DRIVER_LEGACY))
- return;
-
- mutex_lock(&dev->struct_mutex);
- idr_destroy(&dev->ctx_idr);
- mutex_unlock(&dev->struct_mutex);
-}
-
-/**
- * drm_legacy_ctxbitmap_flush() - Flush all contexts owned by a file
- * @dev: DRM device to operate on
- * @file: Open file to flush contexts for
- *
- * This iterates over all contexts on @dev and drops them if they're owned by
- * @file. Note that after this call returns, new contexts might be added if
- * the file is still alive.
- */
-void drm_legacy_ctxbitmap_flush(struct drm_device *dev, struct drm_file *file)
-{
- struct drm_ctx_list *pos, *tmp;
-
- if (!drm_core_check_feature(dev, DRIVER_LEGACY))
- return;
-
- mutex_lock(&dev->ctxlist_mutex);
-
- list_for_each_entry_safe(pos, tmp, &dev->ctxlist, head) {
- if (pos->tag == file &&
- pos->handle != DRM_KERNEL_CONTEXT) {
- if (dev->driver->context_dtor)
- dev->driver->context_dtor(dev, pos->handle);
-
- drm_legacy_ctxbitmap_free(dev, pos->handle);
- list_del(&pos->head);
- kfree(pos);
- }
- }
-
- mutex_unlock(&dev->ctxlist_mutex);
-}
-
-/*@}*/
-
-/******************************************************************/
-/** \name Per Context SAREA Support */
-/*@{*/
-
-/*
- * Get per-context SAREA.
- *
- * \param inode device inode.
- * \param file_priv DRM file private.
- * \param cmd command.
- * \param arg user argument pointing to a drm_ctx_priv_map structure.
- * \return zero on success or a negative number on failure.
- *
- * Gets the map from drm_device::ctx_idr with the handle specified and
- * returns its handle.
- */
-int drm_legacy_getsareactx(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_ctx_priv_map *request = data;
- struct drm_local_map *map;
- struct drm_map_list *_entry;
-
- if (!drm_core_check_feature(dev, DRIVER_LEGACY))
- return -EOPNOTSUPP;
-
- mutex_lock(&dev->struct_mutex);
-
- map = idr_find(&dev->ctx_idr, request->ctx_id);
- if (!map) {
- mutex_unlock(&dev->struct_mutex);
- return -EINVAL;
- }
-
- request->handle = NULL;
- list_for_each_entry(_entry, &dev->maplist, head) {
- if (_entry->map == map) {
- request->handle =
- (void *)(unsigned long)_entry->user_token;
- break;
- }
- }
-
- mutex_unlock(&dev->struct_mutex);
-
- if (request->handle == NULL)
- return -EINVAL;
-
- return 0;
-}
-
-/*
- * Set per-context SAREA.
- *
- * \param inode device inode.
- * \param file_priv DRM file private.
- * \param cmd command.
- * \param arg user argument pointing to a drm_ctx_priv_map structure.
- * \return zero on success or a negative number on failure.
- *
- * Searches the mapping specified in \p arg and update the entry in
- * drm_device::ctx_idr with it.
- */
-int drm_legacy_setsareactx(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_ctx_priv_map *request = data;
- struct drm_local_map *map = NULL;
- struct drm_map_list *r_list = NULL;
-
- if (!drm_core_check_feature(dev, DRIVER_LEGACY))
- return -EOPNOTSUPP;
-
- mutex_lock(&dev->struct_mutex);
- list_for_each_entry(r_list, &dev->maplist, head) {
- if (r_list->map
- && r_list->user_token == (unsigned long) request->handle)
- goto found;
- }
- bad:
- mutex_unlock(&dev->struct_mutex);
- return -EINVAL;
-
- found:
- map = r_list->map;
- if (!map)
- goto bad;
-
- if (IS_ERR(idr_replace(&dev->ctx_idr, map, request->ctx_id)))
- goto bad;
-
- mutex_unlock(&dev->struct_mutex);
-
- return 0;
-}
-
-/*@}*/
-
-/******************************************************************/
-/** \name The actual DRM context handling routines */
-/*@{*/
-
-/*
- * Switch context.
- *
- * \param dev DRM device.
- * \param old old context handle.
- * \param new new context handle.
- * \return zero on success or a negative number on failure.
- *
- * Attempt to set drm_device::context_flag.
- */
-static int drm_context_switch(struct drm_device * dev, int old, int new)
-{
- if (test_and_set_bit(0, &dev->context_flag)) {
- DRM_ERROR("Reentering -- FIXME\n");
- return -EBUSY;
- }
-
- DRM_DEBUG("Context switch from %d to %d\n", old, new);
-
- if (new == dev->last_context) {
- clear_bit(0, &dev->context_flag);
- return 0;
- }
-
- return 0;
-}
-
-/*
- * Complete context switch.
- *
- * \param dev DRM device.
- * \param new new context handle.
- * \return zero on success or a negative number on failure.
- *
- * Updates drm_device::last_context and drm_device::last_switch. Verifies the
- * hardware lock is held, clears the drm_device::context_flag and wakes up
- * drm_device::context_wait.
- */
-static int drm_context_switch_complete(struct drm_device *dev,
- struct drm_file *file_priv, int new)
-{
- dev->last_context = new; /* PRE/POST: This is the _only_ writer. */
-
- if (!_DRM_LOCK_IS_HELD(file_priv->master->lock.hw_lock->lock)) {
- DRM_ERROR("Lock isn't held after context switch\n");
- }
-
- /* If a context switch is ever initiated
- when the kernel holds the lock, release
- that lock here.
- */
- clear_bit(0, &dev->context_flag);
-
- return 0;
-}
-
-/*
- * Reserve contexts.
- *
- * \param inode device inode.
- * \param file_priv DRM file private.
- * \param cmd command.
- * \param arg user argument pointing to a drm_ctx_res structure.
- * \return zero on success or a negative number on failure.
- */
-int drm_legacy_resctx(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_ctx_res *res = data;
- struct drm_ctx ctx;
- int i;
-
- if (!drm_core_check_feature(dev, DRIVER_LEGACY))
- return -EOPNOTSUPP;
-
- if (res->count >= DRM_RESERVED_CONTEXTS) {
- memset(&ctx, 0, sizeof(ctx));
- for (i = 0; i < DRM_RESERVED_CONTEXTS; i++) {
- ctx.handle = i;
- if (copy_to_user(&res->contexts[i], &ctx, sizeof(ctx)))
- return -EFAULT;
- }
- }
- res->count = DRM_RESERVED_CONTEXTS;
-
- return 0;
-}
-
-/*
- * Add context.
- *
- * \param inode device inode.
- * \param file_priv DRM file private.
- * \param cmd command.
- * \param arg user argument pointing to a drm_ctx structure.
- * \return zero on success or a negative number on failure.
- *
- * Get a new handle for the context and copy to userspace.
- */
-int drm_legacy_addctx(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_ctx_list *ctx_entry;
- struct drm_ctx *ctx = data;
- int tmp_handle;
-
- if (!drm_core_check_feature(dev, DRIVER_LEGACY))
- return -EOPNOTSUPP;
-
- tmp_handle = drm_legacy_ctxbitmap_next(dev);
- if (tmp_handle == DRM_KERNEL_CONTEXT) {
- /* Skip kernel's context and get a new one. */
- tmp_handle = drm_legacy_ctxbitmap_next(dev);
- }
- DRM_DEBUG("%d\n", tmp_handle);
- if (tmp_handle < 0) {
- DRM_DEBUG("Not enough free contexts.\n");
- /* Should this return -EBUSY instead? */
- return tmp_handle;
- }
-
- ctx->handle = tmp_handle;
-
- ctx_entry = kmalloc(sizeof(*ctx_entry), GFP_KERNEL);
- if (!ctx_entry) {
- DRM_DEBUG("out of memory\n");
- return -ENOMEM;
- }
-
- INIT_LIST_HEAD(&ctx_entry->head);
- ctx_entry->handle = ctx->handle;
- ctx_entry->tag = file_priv;
-
- mutex_lock(&dev->ctxlist_mutex);
- list_add(&ctx_entry->head, &dev->ctxlist);
- mutex_unlock(&dev->ctxlist_mutex);
-
- return 0;
-}
-
-/*
- * Get context.
- *
- * \param inode device inode.
- * \param file_priv DRM file private.
- * \param cmd command.
- * \param arg user argument pointing to a drm_ctx structure.
- * \return zero on success or a negative number on failure.
- */
-int drm_legacy_getctx(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_ctx *ctx = data;
-
- if (!drm_core_check_feature(dev, DRIVER_LEGACY))
- return -EOPNOTSUPP;
-
- /* This is 0, because we don't handle any context flags */
- ctx->flags = 0;
-
- return 0;
-}
-
-/*
- * Switch context.
- *
- * \param inode device inode.
- * \param file_priv DRM file private.
- * \param cmd command.
- * \param arg user argument pointing to a drm_ctx structure.
- * \return zero on success or a negative number on failure.
- *
- * Calls context_switch().
- */
-int drm_legacy_switchctx(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_ctx *ctx = data;
-
- if (!drm_core_check_feature(dev, DRIVER_LEGACY))
- return -EOPNOTSUPP;
-
- DRM_DEBUG("%d\n", ctx->handle);
- return drm_context_switch(dev, dev->last_context, ctx->handle);
-}
-
-/*
- * New context.
- *
- * \param inode device inode.
- * \param file_priv DRM file private.
- * \param cmd command.
- * \param arg user argument pointing to a drm_ctx structure.
- * \return zero on success or a negative number on failure.
- *
- * Calls context_switch_complete().
- */
-int drm_legacy_newctx(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_ctx *ctx = data;
-
- if (!drm_core_check_feature(dev, DRIVER_LEGACY))
- return -EOPNOTSUPP;
-
- DRM_DEBUG("%d\n", ctx->handle);
- drm_context_switch_complete(dev, file_priv, ctx->handle);
-
- return 0;
-}
-
-/*
- * Remove context.
- *
- * \param inode device inode.
- * \param file_priv DRM file private.
- * \param cmd command.
- * \param arg user argument pointing to a drm_ctx structure.
- * \return zero on success or a negative number on failure.
- *
- * If not the special kernel context, calls ctxbitmap_free() to free the specified context.
- */
-int drm_legacy_rmctx(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_ctx *ctx = data;
-
- if (!drm_core_check_feature(dev, DRIVER_LEGACY))
- return -EOPNOTSUPP;
-
- DRM_DEBUG("%d\n", ctx->handle);
- if (ctx->handle != DRM_KERNEL_CONTEXT) {
- if (dev->driver->context_dtor)
- dev->driver->context_dtor(dev, ctx->handle);
- drm_legacy_ctxbitmap_free(dev, ctx->handle);
- }
-
- mutex_lock(&dev->ctxlist_mutex);
- if (!list_empty(&dev->ctxlist)) {
- struct drm_ctx_list *pos, *n;
-
- list_for_each_entry_safe(pos, n, &dev->ctxlist, head) {
- if (pos->handle == ctx->handle) {
- list_del(&pos->head);
- kfree(pos);
- }
- }
- }
- mutex_unlock(&dev->ctxlist_mutex);
-
- return 0;
-}
-
-/*@}*/
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index a209659a996c..2dafc39a27cb 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -439,11 +439,8 @@ EXPORT_SYMBOL(drm_crtc_helper_set_mode);
* @state: atomic state object
*
* Provides a default CRTC-state check handler for CRTCs that only have
- * one primary plane attached to it.
- *
- * This is often the case for the CRTC of simple framebuffers. See also
- * drm_plane_helper_atomic_check() for the respective plane-state check
- * helper function.
+ * one primary plane attached to it. This is often the case for the CRTC
+ * of simple framebuffers.
*
* RETURNS:
* Zero on success, or an errno code otherwise.
diff --git a/drivers/gpu/drm/drm_crtc_internal.h b/drivers/gpu/drm/drm_crtc_internal.h
index 6b646e0783be..a514d5207e41 100644
--- a/drivers/gpu/drm/drm_crtc_internal.h
+++ b/drivers/gpu/drm/drm_crtc_internal.h
@@ -253,7 +253,7 @@ int drm_atomic_set_property(struct drm_atomic_state *state,
struct drm_file *file_priv,
struct drm_mode_object *obj,
struct drm_property *prop,
- uint64_t prop_value);
+ u64 prop_value, bool async_flip);
int drm_atomic_get_property(struct drm_mode_object *obj,
struct drm_property *property, uint64_t *val);
diff --git a/drivers/gpu/drm/drm_damage_helper.c b/drivers/gpu/drm/drm_damage_helper.c
index d8b2955e88fd..afb02aae707b 100644
--- a/drivers/gpu/drm/drm_damage_helper.c
+++ b/drivers/gpu/drm/drm_damage_helper.c
@@ -241,7 +241,8 @@ drm_atomic_helper_damage_iter_init(struct drm_atomic_helper_damage_iter *iter,
iter->plane_src.x2 = (src.x2 >> 16) + !!(src.x2 & 0xFFFF);
iter->plane_src.y2 = (src.y2 >> 16) + !!(src.y2 & 0xFFFF);
- if (!iter->clips || !drm_rect_equals(&state->src, &old_state->src)) {
+ if (!iter->clips || state->ignore_damage_clips ||
+ !drm_rect_equals(&state->src, &old_state->src)) {
iter->clips = NULL;
iter->num_clips = 0;
iter->full_update = true;
diff --git a/drivers/gpu/drm/drm_debugfs.c b/drivers/gpu/drm/drm_debugfs.c
index f291fb4b359f..02e7481758c0 100644
--- a/drivers/gpu/drm/drm_debugfs.c
+++ b/drivers/gpu/drm/drm_debugfs.c
@@ -314,10 +314,8 @@ void drm_debugfs_dev_register(struct drm_device *dev)
drm_framebuffer_debugfs_init(dev);
drm_client_debugfs_init(dev);
}
- if (drm_drv_uses_atomic_modeset(dev)) {
+ if (drm_drv_uses_atomic_modeset(dev))
drm_atomic_debugfs_init(dev);
- drm_bridge_debugfs_init(dev);
- }
}
int drm_debugfs_register(struct drm_minor *minor, int minor_id,
@@ -589,4 +587,65 @@ void drm_debugfs_crtc_remove(struct drm_crtc *crtc)
crtc->debugfs_entry = NULL;
}
+static int bridges_show(struct seq_file *m, void *data)
+{
+ struct drm_encoder *encoder = m->private;
+ struct drm_printer p = drm_seq_file_printer(m);
+ struct drm_bridge *bridge;
+ unsigned int idx = 0;
+
+ drm_for_each_bridge_in_chain(encoder, bridge) {
+ drm_printf(&p, "bridge[%d]: %ps\n", idx++, bridge->funcs);
+ drm_printf(&p, "\ttype: [%d] %s\n",
+ bridge->type,
+ drm_get_connector_type_name(bridge->type));
+#ifdef CONFIG_OF
+ if (bridge->of_node)
+ drm_printf(&p, "\tOF: %pOFfc\n", bridge->of_node);
+#endif
+ drm_printf(&p, "\tops: [0x%x]", bridge->ops);
+ if (bridge->ops & DRM_BRIDGE_OP_DETECT)
+ drm_puts(&p, " detect");
+ if (bridge->ops & DRM_BRIDGE_OP_EDID)
+ drm_puts(&p, " edid");
+ if (bridge->ops & DRM_BRIDGE_OP_HPD)
+ drm_puts(&p, " hpd");
+ if (bridge->ops & DRM_BRIDGE_OP_MODES)
+ drm_puts(&p, " modes");
+ drm_puts(&p, "\n");
+ }
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(bridges);
+
+void drm_debugfs_encoder_add(struct drm_encoder *encoder)
+{
+ struct drm_minor *minor = encoder->dev->primary;
+ struct dentry *root;
+ char *name;
+
+ name = kasprintf(GFP_KERNEL, "encoder-%d", encoder->index);
+ if (!name)
+ return;
+
+ root = debugfs_create_dir(name, minor->debugfs_root);
+ kfree(name);
+
+ encoder->debugfs_entry = root;
+
+ /* bridges list */
+ debugfs_create_file("bridges", 0444, root, encoder,
+ &bridges_fops);
+
+ if (encoder->funcs->debugfs_init)
+ encoder->funcs->debugfs_init(encoder, root);
+}
+
+void drm_debugfs_encoder_remove(struct drm_encoder *encoder)
+{
+ debugfs_remove_recursive(encoder->debugfs_entry);
+ encoder->debugfs_entry = NULL;
+}
+
#endif /* CONFIG_DEBUG_FS */
diff --git a/drivers/gpu/drm/drm_dma.c b/drivers/gpu/drm/drm_dma.c
deleted file mode 100644
index eb6b741a6f99..000000000000
--- a/drivers/gpu/drm/drm_dma.c
+++ /dev/null
@@ -1,178 +0,0 @@
-/*
- * \file drm_dma.c
- * DMA IOCTL and function support
- *
- * \author Rickard E. (Rik) Faith <[email protected]>
- * \author Gareth Hughes <[email protected]>
- */
-
-/*
- * Created: Fri Mar 19 14:30:16 1999 by [email protected]
- *
- * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
- * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
-#include <linux/export.h>
-#include <linux/pci.h>
-
-#include <drm/drm_drv.h>
-#include <drm/drm_print.h>
-
-#include "drm_legacy.h"
-
-/**
- * drm_legacy_dma_setup() - Initialize the DMA data.
- *
- * @dev: DRM device.
- * Return: zero on success or a negative value on failure.
- *
- * Allocate and initialize a drm_device_dma structure.
- */
-int drm_legacy_dma_setup(struct drm_device *dev)
-{
- int i;
-
- if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA) ||
- !drm_core_check_feature(dev, DRIVER_LEGACY))
- return 0;
-
- dev->buf_use = 0;
- atomic_set(&dev->buf_alloc, 0);
-
- dev->dma = kzalloc(sizeof(*dev->dma), GFP_KERNEL);
- if (!dev->dma)
- return -ENOMEM;
-
- for (i = 0; i <= DRM_MAX_ORDER; i++)
- memset(&dev->dma->bufs[i], 0, sizeof(dev->dma->bufs[0]));
-
- return 0;
-}
-
-/**
- * drm_legacy_dma_takedown() - Cleanup the DMA resources.
- *
- * @dev: DRM device.
- *
- * Free all pages associated with DMA buffers, the buffers and pages lists, and
- * finally the drm_device::dma structure itself.
- */
-void drm_legacy_dma_takedown(struct drm_device *dev)
-{
- struct drm_device_dma *dma = dev->dma;
- drm_dma_handle_t *dmah;
- int i, j;
-
- if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA) ||
- !drm_core_check_feature(dev, DRIVER_LEGACY))
- return;
-
- if (!dma)
- return;
-
- /* Clear dma buffers */
- for (i = 0; i <= DRM_MAX_ORDER; i++) {
- if (dma->bufs[i].seg_count) {
- DRM_DEBUG("order %d: buf_count = %d,"
- " seg_count = %d\n",
- i,
- dma->bufs[i].buf_count,
- dma->bufs[i].seg_count);
- for (j = 0; j < dma->bufs[i].seg_count; j++) {
- if (dma->bufs[i].seglist[j]) {
- dmah = dma->bufs[i].seglist[j];
- dma_free_coherent(dev->dev,
- dmah->size,
- dmah->vaddr,
- dmah->busaddr);
- kfree(dmah);
- }
- }
- kfree(dma->bufs[i].seglist);
- }
- if (dma->bufs[i].buf_count) {
- for (j = 0; j < dma->bufs[i].buf_count; j++) {
- kfree(dma->bufs[i].buflist[j].dev_private);
- }
- kfree(dma->bufs[i].buflist);
- }
- }
-
- kfree(dma->buflist);
- kfree(dma->pagelist);
- kfree(dev->dma);
- dev->dma = NULL;
-}
-
-/**
- * drm_legacy_free_buffer() - Free a buffer.
- *
- * @dev: DRM device.
- * @buf: buffer to free.
- *
- * Resets the fields of \p buf.
- */
-void drm_legacy_free_buffer(struct drm_device *dev, struct drm_buf * buf)
-{
- if (!buf)
- return;
-
- buf->waiting = 0;
- buf->pending = 0;
- buf->file_priv = NULL;
- buf->used = 0;
-}
-
-/**
- * drm_legacy_reclaim_buffers() - Reclaim the buffers.
- *
- * @dev: DRM device.
- * @file_priv: DRM file private.
- *
- * Frees each buffer associated with \p file_priv not already on the hardware.
- */
-void drm_legacy_reclaim_buffers(struct drm_device *dev,
- struct drm_file *file_priv)
-{
- struct drm_device_dma *dma = dev->dma;
- int i;
-
- if (!dma)
- return;
- for (i = 0; i < dma->buf_count; i++) {
- if (dma->buflist[i]->file_priv == file_priv) {
- switch (dma->buflist[i]->list) {
- case DRM_LIST_NONE:
- drm_legacy_free_buffer(dev, dma->buflist[i]);
- break;
- case DRM_LIST_WAIT:
- dma->buflist[i]->list = DRM_LIST_RECLAIM;
- break;
- default:
- /* Buffer already on hardware. */
- break;
- }
- }
- }
-}
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index 535f16e7882e..243cacb3575c 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -48,7 +48,6 @@
#include "drm_crtc_internal.h"
#include "drm_internal.h"
-#include "drm_legacy.h"
MODULE_AUTHOR("Gareth Hughes, Leif Delgass, José Fonseca, Jon Smirl");
MODULE_DESCRIPTION("DRM shared core routines");
@@ -585,8 +584,6 @@ static void drm_fs_inode_free(struct inode *inode)
static void drm_dev_init_release(struct drm_device *dev, void *res)
{
- drm_legacy_ctxbitmap_cleanup(dev);
- drm_legacy_remove_map_hash(dev);
drm_fs_inode_free(dev->anon_inode);
put_device(dev->dev);
@@ -597,7 +594,6 @@ static void drm_dev_init_release(struct drm_device *dev, void *res)
mutex_destroy(&dev->clientlist_mutex);
mutex_destroy(&dev->filelist_mutex);
mutex_destroy(&dev->struct_mutex);
- drm_legacy_destroy_members(dev);
}
static int drm_dev_init(struct drm_device *dev,
@@ -632,7 +628,6 @@ static int drm_dev_init(struct drm_device *dev,
return -EINVAL;
}
- drm_legacy_init_members(dev);
INIT_LIST_HEAD(&dev->filelist);
INIT_LIST_HEAD(&dev->filelist_internal);
INIT_LIST_HEAD(&dev->clientlist);
@@ -673,12 +668,6 @@ static int drm_dev_init(struct drm_device *dev,
goto err;
}
- ret = drm_legacy_create_map_hash(dev);
- if (ret)
- goto err;
-
- drm_legacy_ctxbitmap_init(dev);
-
if (drm_core_check_feature(dev, DRIVER_GEM)) {
ret = drm_gem_init(dev);
if (ret) {
@@ -949,8 +938,11 @@ int drm_dev_register(struct drm_device *dev, unsigned long flags)
goto err_minors;
}
- if (drm_core_check_feature(dev, DRIVER_MODESET))
- drm_modeset_register_all(dev);
+ if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+ ret = drm_modeset_register_all(dev);
+ if (ret)
+ goto err_unload;
+ }
DRM_INFO("Initialized %s %d.%d.%d %s for %s on minor %d\n",
driver->name, driver->major, driver->minor,
@@ -960,6 +952,9 @@ int drm_dev_register(struct drm_device *dev, unsigned long flags)
goto out_unlock;
+err_unload:
+ if (dev->driver->unload)
+ dev->driver->unload(dev);
err_minors:
remove_compat_control_link(dev);
drm_minor_unregister(dev, DRM_MINOR_ACCEL);
@@ -990,9 +985,6 @@ EXPORT_SYMBOL(drm_dev_register);
*/
void drm_dev_unregister(struct drm_device *dev)
{
- if (drm_core_check_feature(dev, DRIVER_LEGACY))
- drm_lastclose(dev);
-
dev->registered = false;
drm_client_dev_unregister(dev);
@@ -1003,9 +995,6 @@ void drm_dev_unregister(struct drm_device *dev)
if (dev->driver->unload)
dev->driver->unload(dev);
- drm_legacy_pci_agp_destroy(dev);
- drm_legacy_rmmaps(dev);
-
remove_compat_control_link(dev);
drm_minor_unregister(dev, DRM_MINOR_ACCEL);
drm_minor_unregister(dev, DRM_MINOR_PRIMARY);
diff --git a/drivers/gpu/drm/drm_encoder.c b/drivers/gpu/drm/drm_encoder.c
index 1143bc7f3252..8f2bc6a28482 100644
--- a/drivers/gpu/drm/drm_encoder.c
+++ b/drivers/gpu/drm/drm_encoder.c
@@ -30,6 +30,7 @@
#include <drm/drm_print.h>
#include "drm_crtc_internal.h"
+#include "drm_internal.h"
/**
* DOC: overview
@@ -74,6 +75,8 @@ int drm_encoder_register_all(struct drm_device *dev)
int ret = 0;
drm_for_each_encoder(encoder, dev) {
+ drm_debugfs_encoder_add(encoder);
+
if (encoder->funcs && encoder->funcs->late_register)
ret = encoder->funcs->late_register(encoder);
if (ret)
@@ -90,6 +93,7 @@ void drm_encoder_unregister_all(struct drm_device *dev)
drm_for_each_encoder(encoder, dev) {
if (encoder->funcs && encoder->funcs->early_unregister)
encoder->funcs->early_unregister(encoder);
+ drm_debugfs_encoder_remove(encoder);
}
}
diff --git a/drivers/gpu/drm/drm_file.c b/drivers/gpu/drm/drm_file.c
index 5ddaffd32586..8c87287c3e16 100644
--- a/drivers/gpu/drm/drm_file.c
+++ b/drivers/gpu/drm/drm_file.c
@@ -47,7 +47,6 @@
#include "drm_crtc_internal.h"
#include "drm_internal.h"
-#include "drm_legacy.h"
/* from BKL pushdown */
DEFINE_MUTEX(drm_global_mutex);
@@ -55,14 +54,6 @@ DEFINE_MUTEX(drm_global_mutex);
bool drm_dev_needs_global_mutex(struct drm_device *dev)
{
/*
- * Legacy drivers rely on all kinds of BKL locking semantics, don't
- * bother. They also still need BKL locking for their ioctls, so better
- * safe than sorry.
- */
- if (drm_core_check_feature(dev, DRIVER_LEGACY))
- return true;
-
- /*
* The deprecated ->load callback must be called after the driver is
* already registered. This means such drivers rely on the BKL to make
* sure an open can't proceed until the driver is actually fully set up.
@@ -107,9 +98,7 @@ bool drm_dev_needs_global_mutex(struct drm_device *dev)
* drm_send_event() as the main starting points.
*
* The memory mapping implementation will vary depending on how the driver
- * manages memory. Legacy drivers will use the deprecated drm_legacy_mmap()
- * function, modern drivers should use one of the provided memory-manager
- * specific implementations. For GEM-based drivers this is drm_gem_mmap().
+ * manages memory. For GEM-based drivers this is drm_gem_mmap().
*
* No other file operations are supported by the DRM userspace API. Overall the
* following is an example &file_operations structure::
@@ -254,18 +243,6 @@ void drm_file_free(struct drm_file *file)
(long)old_encode_dev(file->minor->kdev->devt),
atomic_read(&dev->open_count));
-#ifdef CONFIG_DRM_LEGACY
- if (drm_core_check_feature(dev, DRIVER_LEGACY) &&
- dev->driver->preclose)
- dev->driver->preclose(dev, file);
-#endif
-
- if (drm_core_check_feature(dev, DRIVER_LEGACY))
- drm_legacy_lock_release(dev, file->filp);
-
- if (drm_core_check_feature(dev, DRIVER_HAVE_DMA))
- drm_legacy_reclaim_buffers(dev, file);
-
drm_events_release(file);
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
@@ -279,8 +256,6 @@ void drm_file_free(struct drm_file *file)
if (drm_core_check_feature(dev, DRIVER_GEM))
drm_gem_release(dev, file);
- drm_legacy_ctxbitmap_flush(dev, file);
-
if (drm_is_primary_client(file))
drm_master_release(file);
@@ -367,29 +342,6 @@ int drm_open_helper(struct file *filp, struct drm_minor *minor)
list_add(&priv->lhead, &dev->filelist);
mutex_unlock(&dev->filelist_mutex);
-#ifdef CONFIG_DRM_LEGACY
-#ifdef __alpha__
- /*
- * Default the hose
- */
- if (!dev->hose) {
- struct pci_dev *pci_dev;
-
- pci_dev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, NULL);
- if (pci_dev) {
- dev->hose = pci_dev->sysdata;
- pci_dev_put(pci_dev);
- }
- if (!dev->hose) {
- struct pci_bus *b = list_entry(pci_root_buses.next,
- struct pci_bus, node);
- if (b)
- dev->hose = b->sysdata;
- }
- }
-#endif
-#endif
-
return 0;
}
@@ -411,7 +363,6 @@ int drm_open(struct inode *inode, struct file *filp)
struct drm_device *dev;
struct drm_minor *minor;
int retcode;
- int need_setup = 0;
minor = drm_minor_acquire(iminor(inode));
if (IS_ERR(minor))
@@ -421,8 +372,7 @@ int drm_open(struct inode *inode, struct file *filp)
if (drm_dev_needs_global_mutex(dev))
mutex_lock(&drm_global_mutex);
- if (!atomic_fetch_inc(&dev->open_count))
- need_setup = 1;
+ atomic_fetch_inc(&dev->open_count);
/* share address_space across all char-devs of a single device */
filp->f_mapping = dev->anon_inode->i_mapping;
@@ -430,13 +380,6 @@ int drm_open(struct inode *inode, struct file *filp)
retcode = drm_open_helper(filp, minor);
if (retcode)
goto err_undo;
- if (need_setup) {
- retcode = drm_legacy_setup(dev);
- if (retcode) {
- drm_close_helper(filp);
- goto err_undo;
- }
- }
if (drm_dev_needs_global_mutex(dev))
mutex_unlock(&drm_global_mutex);
@@ -460,9 +403,6 @@ void drm_lastclose(struct drm_device * dev)
dev->driver->lastclose(dev);
drm_dbg_core(dev, "driver lastclose completed\n");
- if (drm_core_check_feature(dev, DRIVER_LEGACY))
- drm_legacy_dev_reinit(dev);
-
drm_client_dev_restore(dev);
}
@@ -958,7 +898,7 @@ void drm_show_memory_stats(struct drm_printer *p, struct drm_file *file)
{
struct drm_gem_object *obj;
struct drm_memory_stats status = {};
- enum drm_gem_object_status supported_status;
+ enum drm_gem_object_status supported_status = 0;
int id;
spin_lock(&file->table_lock);
diff --git a/drivers/gpu/drm/drm_framebuffer.c b/drivers/gpu/drm/drm_framebuffer.c
index 09e289fca5c3..3cc0ffc28e86 100644
--- a/drivers/gpu/drm/drm_framebuffer.c
+++ b/drivers/gpu/drm/drm_framebuffer.c
@@ -583,7 +583,7 @@ int drm_mode_getfb2_ioctl(struct drm_device *dev,
struct drm_mode_fb_cmd2 *r = data;
struct drm_framebuffer *fb;
unsigned int i;
- int ret;
+ int ret = 0;
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
diff --git a/drivers/gpu/drm/drm_gpuvm.c b/drivers/gpu/drm/drm_gpuvm.c
index 54f5e8851de5..a68ee6cc8a10 100644
--- a/drivers/gpu/drm/drm_gpuvm.c
+++ b/drivers/gpu/drm/drm_gpuvm.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0 OR MIT
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
/*
* Copyright (c) 2022 Red Hat.
*
@@ -1086,6 +1086,37 @@ drm_gpuvm_put(struct drm_gpuvm *gpuvm)
EXPORT_SYMBOL_GPL(drm_gpuvm_put);
static int
+exec_prepare_obj(struct drm_exec *exec, struct drm_gem_object *obj,
+ unsigned int num_fences)
+{
+ return num_fences ? drm_exec_prepare_obj(exec, obj, num_fences) :
+ drm_exec_lock_obj(exec, obj);
+}
+
+/**
+ * drm_gpuvm_prepare_vm() - prepare the GPUVMs common dma-resv
+ * @gpuvm: the &drm_gpuvm
+ * @exec: the &drm_exec context
+ * @num_fences: the amount of &dma_fences to reserve
+ *
+ * Calls drm_exec_prepare_obj() for the GPUVMs dummy &drm_gem_object; if
+ * @num_fences is zero drm_exec_lock_obj() is called instead.
+ *
+ * Using this function directly, it is the drivers responsibility to call
+ * drm_exec_init() and drm_exec_fini() accordingly.
+ *
+ * Returns: 0 on success, negative error code on failure.
+ */
+int
+drm_gpuvm_prepare_vm(struct drm_gpuvm *gpuvm,
+ struct drm_exec *exec,
+ unsigned int num_fences)
+{
+ return exec_prepare_obj(exec, gpuvm->r_obj, num_fences);
+}
+EXPORT_SYMBOL_GPL(drm_gpuvm_prepare_vm);
+
+static int
__drm_gpuvm_prepare_objects(struct drm_gpuvm *gpuvm,
struct drm_exec *exec,
unsigned int num_fences)
@@ -1095,7 +1126,7 @@ __drm_gpuvm_prepare_objects(struct drm_gpuvm *gpuvm,
int ret = 0;
for_each_vm_bo_in_list(gpuvm, extobj, &extobjs, vm_bo) {
- ret = drm_exec_prepare_obj(exec, vm_bo->obj, num_fences);
+ ret = exec_prepare_obj(exec, vm_bo->obj, num_fences);
if (ret)
break;
}
@@ -1116,7 +1147,7 @@ drm_gpuvm_prepare_objects_locked(struct drm_gpuvm *gpuvm,
drm_gpuvm_resv_assert_held(gpuvm);
list_for_each_entry(vm_bo, &gpuvm->extobj.list, list.entry.extobj) {
- ret = drm_exec_prepare_obj(exec, vm_bo->obj, num_fences);
+ ret = exec_prepare_obj(exec, vm_bo->obj, num_fences);
if (ret)
break;
@@ -1134,7 +1165,8 @@ drm_gpuvm_prepare_objects_locked(struct drm_gpuvm *gpuvm,
* @num_fences: the amount of &dma_fences to reserve
*
* Calls drm_exec_prepare_obj() for all &drm_gem_objects the given
- * &drm_gpuvm contains mappings of.
+ * &drm_gpuvm contains mappings of; if @num_fences is zero drm_exec_lock_obj()
+ * is called instead.
*
* Using this function directly, it is the drivers responsibility to call
* drm_exec_init() and drm_exec_fini() accordingly.
@@ -1171,7 +1203,8 @@ EXPORT_SYMBOL_GPL(drm_gpuvm_prepare_objects);
* @num_fences: the amount of &dma_fences to reserve
*
* Calls drm_exec_prepare_obj() for all &drm_gem_objects mapped between @addr
- * and @addr + @range.
+ * and @addr + @range; if @num_fences is zero drm_exec_lock_obj() is called
+ * instead.
*
* Returns: 0 on success, negative error code on failure.
*/
@@ -1186,7 +1219,7 @@ drm_gpuvm_prepare_range(struct drm_gpuvm *gpuvm, struct drm_exec *exec,
drm_gpuvm_for_each_va_range(va, gpuvm, addr, end) {
struct drm_gem_object *obj = va->gem.obj;
- ret = drm_exec_prepare_obj(exec, obj, num_fences);
+ ret = exec_prepare_obj(exec, obj, num_fences);
if (ret)
return ret;
}
@@ -1502,14 +1535,18 @@ drm_gpuvm_bo_destroy(struct kref *kref)
* hold the dma-resv or driver specific GEM gpuva lock.
*
* This function may only be called from non-atomic context.
+ *
+ * Returns: true if vm_bo was destroyed, false otherwise.
*/
-void
+bool
drm_gpuvm_bo_put(struct drm_gpuvm_bo *vm_bo)
{
might_sleep();
if (vm_bo)
- kref_put(&vm_bo->kref, drm_gpuvm_bo_destroy);
+ return !!kref_put(&vm_bo->kref, drm_gpuvm_bo_destroy);
+
+ return false;
}
EXPORT_SYMBOL_GPL(drm_gpuvm_bo_put);
diff --git a/drivers/gpu/drm/drm_hashtab.c b/drivers/gpu/drm/drm_hashtab.c
deleted file mode 100644
index 60afa1865559..000000000000
--- a/drivers/gpu/drm/drm_hashtab.c
+++ /dev/null
@@ -1,203 +0,0 @@
-/**************************************************************************
- *
- * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND. USA.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- *
- **************************************************************************/
-/*
- * Simple open hash tab implementation.
- *
- * Authors:
- * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
- */
-
-#include <linux/hash.h>
-#include <linux/mm.h>
-#include <linux/rculist.h>
-#include <linux/slab.h>
-#include <linux/vmalloc.h>
-
-#include <drm/drm_print.h>
-
-#include "drm_legacy.h"
-
-int drm_ht_create(struct drm_open_hash *ht, unsigned int order)
-{
- unsigned int size = 1 << order;
-
- ht->order = order;
- ht->table = NULL;
- if (size <= PAGE_SIZE / sizeof(*ht->table))
- ht->table = kcalloc(size, sizeof(*ht->table), GFP_KERNEL);
- else
- ht->table = vzalloc(array_size(size, sizeof(*ht->table)));
- if (!ht->table) {
- DRM_ERROR("Out of memory for hash table\n");
- return -ENOMEM;
- }
- return 0;
-}
-
-void drm_ht_verbose_list(struct drm_open_hash *ht, unsigned long key)
-{
- struct drm_hash_item *entry;
- struct hlist_head *h_list;
- unsigned int hashed_key;
- int count = 0;
-
- hashed_key = hash_long(key, ht->order);
- DRM_DEBUG("Key is 0x%08lx, Hashed key is 0x%08x\n", key, hashed_key);
- h_list = &ht->table[hashed_key];
- hlist_for_each_entry(entry, h_list, head)
- DRM_DEBUG("count %d, key: 0x%08lx\n", count++, entry->key);
-}
-
-static struct hlist_node *drm_ht_find_key(struct drm_open_hash *ht,
- unsigned long key)
-{
- struct drm_hash_item *entry;
- struct hlist_head *h_list;
- unsigned int hashed_key;
-
- hashed_key = hash_long(key, ht->order);
- h_list = &ht->table[hashed_key];
- hlist_for_each_entry(entry, h_list, head) {
- if (entry->key == key)
- return &entry->head;
- if (entry->key > key)
- break;
- }
- return NULL;
-}
-
-static struct hlist_node *drm_ht_find_key_rcu(struct drm_open_hash *ht,
- unsigned long key)
-{
- struct drm_hash_item *entry;
- struct hlist_head *h_list;
- unsigned int hashed_key;
-
- hashed_key = hash_long(key, ht->order);
- h_list = &ht->table[hashed_key];
- hlist_for_each_entry_rcu(entry, h_list, head) {
- if (entry->key == key)
- return &entry->head;
- if (entry->key > key)
- break;
- }
- return NULL;
-}
-
-int drm_ht_insert_item(struct drm_open_hash *ht, struct drm_hash_item *item)
-{
- struct drm_hash_item *entry;
- struct hlist_head *h_list;
- struct hlist_node *parent;
- unsigned int hashed_key;
- unsigned long key = item->key;
-
- hashed_key = hash_long(key, ht->order);
- h_list = &ht->table[hashed_key];
- parent = NULL;
- hlist_for_each_entry(entry, h_list, head) {
- if (entry->key == key)
- return -EINVAL;
- if (entry->key > key)
- break;
- parent = &entry->head;
- }
- if (parent) {
- hlist_add_behind_rcu(&item->head, parent);
- } else {
- hlist_add_head_rcu(&item->head, h_list);
- }
- return 0;
-}
-
-/*
- * Just insert an item and return any "bits" bit key that hasn't been
- * used before.
- */
-int drm_ht_just_insert_please(struct drm_open_hash *ht, struct drm_hash_item *item,
- unsigned long seed, int bits, int shift,
- unsigned long add)
-{
- int ret;
- unsigned long mask = (1UL << bits) - 1;
- unsigned long first, unshifted_key;
-
- unshifted_key = hash_long(seed, bits);
- first = unshifted_key;
- do {
- item->key = (unshifted_key << shift) + add;
- ret = drm_ht_insert_item(ht, item);
- if (ret)
- unshifted_key = (unshifted_key + 1) & mask;
- } while(ret && (unshifted_key != first));
-
- if (ret) {
- DRM_ERROR("Available key bit space exhausted\n");
- return -EINVAL;
- }
- return 0;
-}
-
-int drm_ht_find_item(struct drm_open_hash *ht, unsigned long key,
- struct drm_hash_item **item)
-{
- struct hlist_node *list;
-
- list = drm_ht_find_key_rcu(ht, key);
- if (!list)
- return -EINVAL;
-
- *item = hlist_entry(list, struct drm_hash_item, head);
- return 0;
-}
-
-int drm_ht_remove_key(struct drm_open_hash *ht, unsigned long key)
-{
- struct hlist_node *list;
-
- list = drm_ht_find_key(ht, key);
- if (list) {
- hlist_del_init_rcu(list);
- return 0;
- }
- return -EINVAL;
-}
-
-int drm_ht_remove_item(struct drm_open_hash *ht, struct drm_hash_item *item)
-{
- hlist_del_init_rcu(&item->head);
- return 0;
-}
-
-void drm_ht_remove(struct drm_open_hash *ht)
-{
- if (ht->table) {
- kvfree(ht->table);
- ht->table = NULL;
- }
-}
diff --git a/drivers/gpu/drm/drm_internal.h b/drivers/gpu/drm/drm_internal.h
index b12c463bc460..8e4faf0a28e6 100644
--- a/drivers/gpu/drm/drm_internal.h
+++ b/drivers/gpu/drm/drm_internal.h
@@ -117,17 +117,10 @@ void drm_handle_vblank_works(struct drm_vblank_crtc *vblank);
/* IOCTLS */
int drm_wait_vblank_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp);
-int drm_legacy_modeset_ctl_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
/* drm_irq.c */
/* IOCTLS */
-#if IS_ENABLED(CONFIG_DRM_LEGACY)
-int drm_legacy_irq_control(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-#endif
-
int drm_crtc_get_sequence_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp);
@@ -194,6 +187,8 @@ void drm_debugfs_connector_remove(struct drm_connector *connector);
void drm_debugfs_crtc_add(struct drm_crtc *crtc);
void drm_debugfs_crtc_remove(struct drm_crtc *crtc);
void drm_debugfs_crtc_crc_add(struct drm_crtc *crtc);
+void drm_debugfs_encoder_add(struct drm_encoder *encoder);
+void drm_debugfs_encoder_remove(struct drm_encoder *encoder);
#else
static inline void drm_debugfs_dev_fini(struct drm_device *dev)
{
@@ -231,6 +226,14 @@ static inline void drm_debugfs_crtc_crc_add(struct drm_crtc *crtc)
{
}
+static inline void drm_debugfs_encoder_add(struct drm_encoder *encoder)
+{
+}
+
+static inline void drm_debugfs_encoder_remove(struct drm_encoder *encoder)
+{
+}
+
#endif
drm_ioctl_t drm_version;
diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c
index 025dc558c94e..129e2b91dbfe 100644
--- a/drivers/gpu/drm/drm_ioc32.c
+++ b/drivers/gpu/drm/drm_ioc32.c
@@ -31,12 +31,12 @@
#include <linux/ratelimit.h>
#include <linux/export.h>
+#include <drm/drm_device.h>
#include <drm/drm_file.h>
#include <drm/drm_print.h>
#include "drm_crtc_internal.h"
#include "drm_internal.h"
-#include "drm_legacy.h"
#define DRM_IOCTL_VERSION32 DRM_IOWR(0x00, drm_version32_t)
#define DRM_IOCTL_GET_UNIQUE32 DRM_IOWR(0x01, drm_unique32_t)
@@ -163,92 +163,6 @@ static int compat_drm_setunique(struct file *file, unsigned int cmd,
return -EINVAL;
}
-#if IS_ENABLED(CONFIG_DRM_LEGACY)
-typedef struct drm_map32 {
- u32 offset; /* Requested physical address (0 for SAREA) */
- u32 size; /* Requested physical size (bytes) */
- enum drm_map_type type; /* Type of memory to map */
- enum drm_map_flags flags; /* Flags */
- u32 handle; /* User-space: "Handle" to pass to mmap() */
- int mtrr; /* MTRR slot used */
-} drm_map32_t;
-
-static int compat_drm_getmap(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- drm_map32_t __user *argp = (void __user *)arg;
- drm_map32_t m32;
- struct drm_map map;
- int err;
-
- if (copy_from_user(&m32, argp, sizeof(m32)))
- return -EFAULT;
-
- map.offset = m32.offset;
- err = drm_ioctl_kernel(file, drm_legacy_getmap_ioctl, &map, 0);
- if (err)
- return err;
-
- m32.offset = map.offset;
- m32.size = map.size;
- m32.type = map.type;
- m32.flags = map.flags;
- m32.handle = ptr_to_compat((void __user *)map.handle);
- m32.mtrr = map.mtrr;
- if (copy_to_user(argp, &m32, sizeof(m32)))
- return -EFAULT;
- return 0;
-
-}
-
-static int compat_drm_addmap(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- drm_map32_t __user *argp = (void __user *)arg;
- drm_map32_t m32;
- struct drm_map map;
- int err;
-
- if (copy_from_user(&m32, argp, sizeof(m32)))
- return -EFAULT;
-
- map.offset = m32.offset;
- map.size = m32.size;
- map.type = m32.type;
- map.flags = m32.flags;
-
- err = drm_ioctl_kernel(file, drm_legacy_addmap_ioctl, &map,
- DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY);
- if (err)
- return err;
-
- m32.offset = map.offset;
- m32.mtrr = map.mtrr;
- m32.handle = ptr_to_compat((void __user *)map.handle);
- if (map.handle != compat_ptr(m32.handle))
- pr_err_ratelimited("compat_drm_addmap truncated handle %p for type %d offset %x\n",
- map.handle, m32.type, m32.offset);
-
- if (copy_to_user(argp, &m32, sizeof(m32)))
- return -EFAULT;
-
- return 0;
-}
-
-static int compat_drm_rmmap(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- drm_map32_t __user *argp = (void __user *)arg;
- struct drm_map map;
- u32 handle;
-
- if (get_user(handle, &argp->handle))
- return -EFAULT;
- map.handle = compat_ptr(handle);
- return drm_ioctl_kernel(file, drm_legacy_rmmap_ioctl, &map, DRM_AUTH);
-}
-#endif
-
typedef struct drm_client32 {
int idx; /* Which client desired? */
int auth; /* Is client authenticated? */
@@ -308,501 +222,6 @@ static int compat_drm_getstats(struct file *file, unsigned int cmd,
return 0;
}
-#if IS_ENABLED(CONFIG_DRM_LEGACY)
-typedef struct drm_buf_desc32 {
- int count; /* Number of buffers of this size */
- int size; /* Size in bytes */
- int low_mark; /* Low water mark */
- int high_mark; /* High water mark */
- int flags;
- u32 agp_start; /* Start address in the AGP aperture */
-} drm_buf_desc32_t;
-
-static int compat_drm_addbufs(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- drm_buf_desc32_t __user *argp = (void __user *)arg;
- drm_buf_desc32_t desc32;
- struct drm_buf_desc desc;
- int err;
-
- if (copy_from_user(&desc32, argp, sizeof(drm_buf_desc32_t)))
- return -EFAULT;
-
- desc = (struct drm_buf_desc){
- desc32.count, desc32.size, desc32.low_mark, desc32.high_mark,
- desc32.flags, desc32.agp_start
- };
-
- err = drm_ioctl_kernel(file, drm_legacy_addbufs, &desc,
- DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY);
- if (err)
- return err;
-
- desc32 = (drm_buf_desc32_t){
- desc.count, desc.size, desc.low_mark, desc.high_mark,
- desc.flags, desc.agp_start
- };
- if (copy_to_user(argp, &desc32, sizeof(drm_buf_desc32_t)))
- return -EFAULT;
-
- return 0;
-}
-
-static int compat_drm_markbufs(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- drm_buf_desc32_t b32;
- drm_buf_desc32_t __user *argp = (void __user *)arg;
- struct drm_buf_desc buf;
-
- if (copy_from_user(&b32, argp, sizeof(b32)))
- return -EFAULT;
-
- buf.size = b32.size;
- buf.low_mark = b32.low_mark;
- buf.high_mark = b32.high_mark;
-
- return drm_ioctl_kernel(file, drm_legacy_markbufs, &buf,
- DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY);
-}
-
-typedef struct drm_buf_info32 {
- int count; /**< Entries in list */
- u32 list;
-} drm_buf_info32_t;
-
-static int copy_one_buf32(void *data, int count, struct drm_buf_entry *from)
-{
- drm_buf_info32_t *request = data;
- drm_buf_desc32_t __user *to = compat_ptr(request->list);
- drm_buf_desc32_t v = {.count = from->buf_count,
- .size = from->buf_size,
- .low_mark = from->low_mark,
- .high_mark = from->high_mark};
-
- if (copy_to_user(to + count, &v, offsetof(drm_buf_desc32_t, flags)))
- return -EFAULT;
- return 0;
-}
-
-static int drm_legacy_infobufs32(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- drm_buf_info32_t *request = data;
-
- return __drm_legacy_infobufs(dev, data, &request->count, copy_one_buf32);
-}
-
-static int compat_drm_infobufs(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- drm_buf_info32_t req32;
- drm_buf_info32_t __user *argp = (void __user *)arg;
- int err;
-
- if (copy_from_user(&req32, argp, sizeof(req32)))
- return -EFAULT;
-
- if (req32.count < 0)
- req32.count = 0;
-
- err = drm_ioctl_kernel(file, drm_legacy_infobufs32, &req32, DRM_AUTH);
- if (err)
- return err;
-
- if (put_user(req32.count, &argp->count))
- return -EFAULT;
-
- return 0;
-}
-
-typedef struct drm_buf_pub32 {
- int idx; /**< Index into the master buffer list */
- int total; /**< Buffer size */
- int used; /**< Amount of buffer in use (for DMA) */
- u32 address; /**< Address of buffer */
-} drm_buf_pub32_t;
-
-typedef struct drm_buf_map32 {
- int count; /**< Length of the buffer list */
- u32 virtual; /**< Mmap'd area in user-virtual */
- u32 list; /**< Buffer information */
-} drm_buf_map32_t;
-
-static int map_one_buf32(void *data, int idx, unsigned long virtual,
- struct drm_buf *buf)
-{
- drm_buf_map32_t *request = data;
- drm_buf_pub32_t __user *to = compat_ptr(request->list) + idx;
- drm_buf_pub32_t v;
-
- v.idx = buf->idx;
- v.total = buf->total;
- v.used = 0;
- v.address = virtual + buf->offset;
- if (copy_to_user(to, &v, sizeof(v)))
- return -EFAULT;
- return 0;
-}
-
-static int drm_legacy_mapbufs32(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- drm_buf_map32_t *request = data;
- void __user *v;
- int err = __drm_legacy_mapbufs(dev, data, &request->count,
- &v, map_one_buf32,
- file_priv);
- request->virtual = ptr_to_compat(v);
- return err;
-}
-
-static int compat_drm_mapbufs(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- drm_buf_map32_t __user *argp = (void __user *)arg;
- drm_buf_map32_t req32;
- int err;
-
- if (copy_from_user(&req32, argp, sizeof(req32)))
- return -EFAULT;
- if (req32.count < 0)
- return -EINVAL;
-
- err = drm_ioctl_kernel(file, drm_legacy_mapbufs32, &req32, DRM_AUTH);
- if (err)
- return err;
-
- if (put_user(req32.count, &argp->count)
- || put_user(req32.virtual, &argp->virtual))
- return -EFAULT;
-
- return 0;
-}
-
-typedef struct drm_buf_free32 {
- int count;
- u32 list;
-} drm_buf_free32_t;
-
-static int compat_drm_freebufs(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- drm_buf_free32_t req32;
- struct drm_buf_free request;
- drm_buf_free32_t __user *argp = (void __user *)arg;
-
- if (copy_from_user(&req32, argp, sizeof(req32)))
- return -EFAULT;
-
- request.count = req32.count;
- request.list = compat_ptr(req32.list);
- return drm_ioctl_kernel(file, drm_legacy_freebufs, &request, DRM_AUTH);
-}
-
-typedef struct drm_ctx_priv_map32 {
- unsigned int ctx_id; /**< Context requesting private mapping */
- u32 handle; /**< Handle of map */
-} drm_ctx_priv_map32_t;
-
-static int compat_drm_setsareactx(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- drm_ctx_priv_map32_t req32;
- struct drm_ctx_priv_map request;
- drm_ctx_priv_map32_t __user *argp = (void __user *)arg;
-
- if (copy_from_user(&req32, argp, sizeof(req32)))
- return -EFAULT;
-
- request.ctx_id = req32.ctx_id;
- request.handle = compat_ptr(req32.handle);
- return drm_ioctl_kernel(file, drm_legacy_setsareactx, &request,
- DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY);
-}
-
-static int compat_drm_getsareactx(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- struct drm_ctx_priv_map req;
- drm_ctx_priv_map32_t req32;
- drm_ctx_priv_map32_t __user *argp = (void __user *)arg;
- int err;
-
- if (copy_from_user(&req32, argp, sizeof(req32)))
- return -EFAULT;
-
- req.ctx_id = req32.ctx_id;
- err = drm_ioctl_kernel(file, drm_legacy_getsareactx, &req, DRM_AUTH);
- if (err)
- return err;
-
- req32.handle = ptr_to_compat((void __user *)req.handle);
- if (copy_to_user(argp, &req32, sizeof(req32)))
- return -EFAULT;
-
- return 0;
-}
-
-typedef struct drm_ctx_res32 {
- int count;
- u32 contexts;
-} drm_ctx_res32_t;
-
-static int compat_drm_resctx(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- drm_ctx_res32_t __user *argp = (void __user *)arg;
- drm_ctx_res32_t res32;
- struct drm_ctx_res res;
- int err;
-
- if (copy_from_user(&res32, argp, sizeof(res32)))
- return -EFAULT;
-
- res.count = res32.count;
- res.contexts = compat_ptr(res32.contexts);
- err = drm_ioctl_kernel(file, drm_legacy_resctx, &res, DRM_AUTH);
- if (err)
- return err;
-
- res32.count = res.count;
- if (copy_to_user(argp, &res32, sizeof(res32)))
- return -EFAULT;
-
- return 0;
-}
-
-typedef struct drm_dma32 {
- int context; /**< Context handle */
- int send_count; /**< Number of buffers to send */
- u32 send_indices; /**< List of handles to buffers */
- u32 send_sizes; /**< Lengths of data to send */
- enum drm_dma_flags flags; /**< Flags */
- int request_count; /**< Number of buffers requested */
- int request_size; /**< Desired size for buffers */
- u32 request_indices; /**< Buffer information */
- u32 request_sizes;
- int granted_count; /**< Number of buffers granted */
-} drm_dma32_t;
-
-static int compat_drm_dma(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- drm_dma32_t d32;
- drm_dma32_t __user *argp = (void __user *)arg;
- struct drm_dma d;
- int err;
-
- if (copy_from_user(&d32, argp, sizeof(d32)))
- return -EFAULT;
-
- d.context = d32.context;
- d.send_count = d32.send_count;
- d.send_indices = compat_ptr(d32.send_indices);
- d.send_sizes = compat_ptr(d32.send_sizes);
- d.flags = d32.flags;
- d.request_count = d32.request_count;
- d.request_indices = compat_ptr(d32.request_indices);
- d.request_sizes = compat_ptr(d32.request_sizes);
- err = drm_ioctl_kernel(file, drm_legacy_dma_ioctl, &d, DRM_AUTH);
- if (err)
- return err;
-
- if (put_user(d.request_size, &argp->request_size)
- || put_user(d.granted_count, &argp->granted_count))
- return -EFAULT;
-
- return 0;
-}
-#endif
-
-#if IS_ENABLED(CONFIG_DRM_LEGACY)
-#if IS_ENABLED(CONFIG_AGP)
-typedef struct drm_agp_mode32 {
- u32 mode; /**< AGP mode */
-} drm_agp_mode32_t;
-
-static int compat_drm_agp_enable(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- drm_agp_mode32_t __user *argp = (void __user *)arg;
- struct drm_agp_mode mode;
-
- if (get_user(mode.mode, &argp->mode))
- return -EFAULT;
-
- return drm_ioctl_kernel(file, drm_legacy_agp_enable_ioctl, &mode,
- DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY);
-}
-
-typedef struct drm_agp_info32 {
- int agp_version_major;
- int agp_version_minor;
- u32 mode;
- u32 aperture_base; /* physical address */
- u32 aperture_size; /* bytes */
- u32 memory_allowed; /* bytes */
- u32 memory_used;
-
- /* PCI information */
- unsigned short id_vendor;
- unsigned short id_device;
-} drm_agp_info32_t;
-
-static int compat_drm_agp_info(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- drm_agp_info32_t __user *argp = (void __user *)arg;
- drm_agp_info32_t i32;
- struct drm_agp_info info;
- int err;
-
- err = drm_ioctl_kernel(file, drm_legacy_agp_info_ioctl, &info, DRM_AUTH);
- if (err)
- return err;
-
- i32.agp_version_major = info.agp_version_major;
- i32.agp_version_minor = info.agp_version_minor;
- i32.mode = info.mode;
- i32.aperture_base = info.aperture_base;
- i32.aperture_size = info.aperture_size;
- i32.memory_allowed = info.memory_allowed;
- i32.memory_used = info.memory_used;
- i32.id_vendor = info.id_vendor;
- i32.id_device = info.id_device;
- if (copy_to_user(argp, &i32, sizeof(i32)))
- return -EFAULT;
-
- return 0;
-}
-
-typedef struct drm_agp_buffer32 {
- u32 size; /**< In bytes -- will round to page boundary */
- u32 handle; /**< Used for binding / unbinding */
- u32 type; /**< Type of memory to allocate */
- u32 physical; /**< Physical used by i810 */
-} drm_agp_buffer32_t;
-
-static int compat_drm_agp_alloc(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- drm_agp_buffer32_t __user *argp = (void __user *)arg;
- drm_agp_buffer32_t req32;
- struct drm_agp_buffer request;
- int err;
-
- if (copy_from_user(&req32, argp, sizeof(req32)))
- return -EFAULT;
-
- request.size = req32.size;
- request.type = req32.type;
- err = drm_ioctl_kernel(file, drm_legacy_agp_alloc_ioctl, &request,
- DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY);
- if (err)
- return err;
-
- req32.handle = request.handle;
- req32.physical = request.physical;
- if (copy_to_user(argp, &req32, sizeof(req32))) {
- drm_ioctl_kernel(file, drm_legacy_agp_free_ioctl, &request,
- DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY);
- return -EFAULT;
- }
-
- return 0;
-}
-
-static int compat_drm_agp_free(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- drm_agp_buffer32_t __user *argp = (void __user *)arg;
- struct drm_agp_buffer request;
-
- if (get_user(request.handle, &argp->handle))
- return -EFAULT;
-
- return drm_ioctl_kernel(file, drm_legacy_agp_free_ioctl, &request,
- DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY);
-}
-
-typedef struct drm_agp_binding32 {
- u32 handle; /**< From drm_agp_buffer */
- u32 offset; /**< In bytes -- will round to page boundary */
-} drm_agp_binding32_t;
-
-static int compat_drm_agp_bind(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- drm_agp_binding32_t __user *argp = (void __user *)arg;
- drm_agp_binding32_t req32;
- struct drm_agp_binding request;
-
- if (copy_from_user(&req32, argp, sizeof(req32)))
- return -EFAULT;
-
- request.handle = req32.handle;
- request.offset = req32.offset;
- return drm_ioctl_kernel(file, drm_legacy_agp_bind_ioctl, &request,
- DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY);
-}
-
-static int compat_drm_agp_unbind(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- drm_agp_binding32_t __user *argp = (void __user *)arg;
- struct drm_agp_binding request;
-
- if (get_user(request.handle, &argp->handle))
- return -EFAULT;
-
- return drm_ioctl_kernel(file, drm_legacy_agp_unbind_ioctl, &request,
- DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY);
-}
-#endif /* CONFIG_AGP */
-
-typedef struct drm_scatter_gather32 {
- u32 size; /**< In bytes -- will round to page boundary */
- u32 handle; /**< Used for mapping / unmapping */
-} drm_scatter_gather32_t;
-
-static int compat_drm_sg_alloc(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- drm_scatter_gather32_t __user *argp = (void __user *)arg;
- struct drm_scatter_gather request;
- int err;
-
- if (get_user(request.size, &argp->size))
- return -EFAULT;
-
- err = drm_ioctl_kernel(file, drm_legacy_sg_alloc, &request,
- DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY);
- if (err)
- return err;
-
- /* XXX not sure about the handle conversion here... */
- if (put_user(request.handle >> PAGE_SHIFT, &argp->handle))
- return -EFAULT;
-
- return 0;
-}
-
-static int compat_drm_sg_free(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- drm_scatter_gather32_t __user *argp = (void __user *)arg;
- struct drm_scatter_gather request;
- unsigned long x;
-
- if (get_user(x, &argp->handle))
- return -EFAULT;
- request.handle = x << PAGE_SHIFT;
- return drm_ioctl_kernel(file, drm_legacy_sg_free, &request,
- DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY);
-}
-#endif
#if defined(CONFIG_X86)
typedef struct drm_update_draw32 {
drm_drawable_t handle;
@@ -854,7 +273,7 @@ static int compat_drm_wait_vblank(struct file *file, unsigned int cmd,
req.request.type = req32.request.type;
req.request.sequence = req32.request.sequence;
req.request.signal = req32.request.signal;
- err = drm_ioctl_kernel(file, drm_wait_vblank_ioctl, &req, DRM_UNLOCKED);
+ err = drm_ioctl_kernel(file, drm_wait_vblank_ioctl, &req, 0);
req32.reply.type = req.reply.type;
req32.reply.sequence = req.reply.sequence;
@@ -914,37 +333,9 @@ static struct {
#define DRM_IOCTL32_DEF(n, f) [DRM_IOCTL_NR(n##32)] = {.fn = f, .name = #n}
DRM_IOCTL32_DEF(DRM_IOCTL_VERSION, compat_drm_version),
DRM_IOCTL32_DEF(DRM_IOCTL_GET_UNIQUE, compat_drm_getunique),
-#if IS_ENABLED(CONFIG_DRM_LEGACY)
- DRM_IOCTL32_DEF(DRM_IOCTL_GET_MAP, compat_drm_getmap),
-#endif
DRM_IOCTL32_DEF(DRM_IOCTL_GET_CLIENT, compat_drm_getclient),
DRM_IOCTL32_DEF(DRM_IOCTL_GET_STATS, compat_drm_getstats),
DRM_IOCTL32_DEF(DRM_IOCTL_SET_UNIQUE, compat_drm_setunique),
-#if IS_ENABLED(CONFIG_DRM_LEGACY)
- DRM_IOCTL32_DEF(DRM_IOCTL_ADD_MAP, compat_drm_addmap),
- DRM_IOCTL32_DEF(DRM_IOCTL_ADD_BUFS, compat_drm_addbufs),
- DRM_IOCTL32_DEF(DRM_IOCTL_MARK_BUFS, compat_drm_markbufs),
- DRM_IOCTL32_DEF(DRM_IOCTL_INFO_BUFS, compat_drm_infobufs),
- DRM_IOCTL32_DEF(DRM_IOCTL_MAP_BUFS, compat_drm_mapbufs),
- DRM_IOCTL32_DEF(DRM_IOCTL_FREE_BUFS, compat_drm_freebufs),
- DRM_IOCTL32_DEF(DRM_IOCTL_RM_MAP, compat_drm_rmmap),
- DRM_IOCTL32_DEF(DRM_IOCTL_SET_SAREA_CTX, compat_drm_setsareactx),
- DRM_IOCTL32_DEF(DRM_IOCTL_GET_SAREA_CTX, compat_drm_getsareactx),
- DRM_IOCTL32_DEF(DRM_IOCTL_RES_CTX, compat_drm_resctx),
- DRM_IOCTL32_DEF(DRM_IOCTL_DMA, compat_drm_dma),
-#if IS_ENABLED(CONFIG_AGP)
- DRM_IOCTL32_DEF(DRM_IOCTL_AGP_ENABLE, compat_drm_agp_enable),
- DRM_IOCTL32_DEF(DRM_IOCTL_AGP_INFO, compat_drm_agp_info),
- DRM_IOCTL32_DEF(DRM_IOCTL_AGP_ALLOC, compat_drm_agp_alloc),
- DRM_IOCTL32_DEF(DRM_IOCTL_AGP_FREE, compat_drm_agp_free),
- DRM_IOCTL32_DEF(DRM_IOCTL_AGP_BIND, compat_drm_agp_bind),
- DRM_IOCTL32_DEF(DRM_IOCTL_AGP_UNBIND, compat_drm_agp_unbind),
-#endif
-#endif
-#if IS_ENABLED(CONFIG_DRM_LEGACY)
- DRM_IOCTL32_DEF(DRM_IOCTL_SG_ALLOC, compat_drm_sg_alloc),
- DRM_IOCTL32_DEF(DRM_IOCTL_SG_FREE, compat_drm_sg_free),
-#endif
#if defined(CONFIG_X86)
DRM_IOCTL32_DEF(DRM_IOCTL_UPDATE_DRAW, compat_drm_update_draw),
#endif
diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
index 44fda68c28ae..e368fc084c77 100644
--- a/drivers/gpu/drm/drm_ioctl.c
+++ b/drivers/gpu/drm/drm_ioctl.c
@@ -42,7 +42,6 @@
#include "drm_crtc_internal.h"
#include "drm_internal.h"
-#include "drm_legacy.h"
/**
* DOC: getunique and setversion story
@@ -301,6 +300,10 @@ static int drm_getcap(struct drm_device *dev, void *data, struct drm_file *file_
case DRM_CAP_CRTC_IN_VBLANK_EVENT:
req->value = 1;
break;
+ case DRM_CAP_ATOMIC_ASYNC_PAGE_FLIP:
+ req->value = drm_core_check_feature(dev, DRIVER_ATOMIC) &&
+ dev->mode_config.async_page_flip;
+ break;
default:
return -EINVAL;
}
@@ -361,6 +364,15 @@ drm_setclientcap(struct drm_device *dev, void *data, struct drm_file *file_priv)
return -EINVAL;
file_priv->writeback_connectors = req->value;
break;
+ case DRM_CLIENT_CAP_CURSOR_PLANE_HOTSPOT:
+ if (!drm_core_check_feature(dev, DRIVER_CURSOR_HOTSPOT))
+ return -EOPNOTSUPP;
+ if (!file_priv->atomic)
+ return -EINVAL;
+ if (req->value > 1)
+ return -EINVAL;
+ file_priv->supports_virtualized_cursor_plane = req->value;
+ break;
default:
return -EINVAL;
}
@@ -559,21 +571,11 @@ static int drm_ioctl_permit(u32 flags, struct drm_file *file_priv)
.name = #ioctl \
}
-#if IS_ENABLED(CONFIG_DRM_LEGACY)
-#define DRM_LEGACY_IOCTL_DEF(ioctl, _func, _flags) DRM_IOCTL_DEF(ioctl, _func, _flags)
-#else
-#define DRM_LEGACY_IOCTL_DEF(ioctl, _func, _flags) DRM_IOCTL_DEF(ioctl, drm_invalid_op, _flags)
-#endif
-
/* Ioctl table */
static const struct drm_ioctl_desc drm_ioctls[] = {
DRM_IOCTL_DEF(DRM_IOCTL_VERSION, drm_version, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF(DRM_IOCTL_GET_UNIQUE, drm_getunique, 0),
DRM_IOCTL_DEF(DRM_IOCTL_GET_MAGIC, drm_getmagic, 0),
- DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_IRQ_BUSID, drm_legacy_irq_by_busid,
- DRM_MASTER|DRM_ROOT_ONLY),
-
- DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_GET_MAP, drm_legacy_getmap_ioctl, 0),
DRM_IOCTL_DEF(DRM_IOCTL_GET_CLIENT, drm_getclient, 0),
DRM_IOCTL_DEF(DRM_IOCTL_GET_STATS, drm_getstats, 0),
@@ -586,63 +588,15 @@ static const struct drm_ioctl_desc drm_ioctls[] = {
DRM_IOCTL_DEF(DRM_IOCTL_UNBLOCK, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_AUTH_MAGIC, drm_authmagic, DRM_MASTER),
- DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_ADD_MAP, drm_legacy_addmap_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_RM_MAP, drm_legacy_rmmap_ioctl, DRM_AUTH),
-
- DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_SET_SAREA_CTX, drm_legacy_setsareactx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_GET_SAREA_CTX, drm_legacy_getsareactx, DRM_AUTH),
-
DRM_IOCTL_DEF(DRM_IOCTL_SET_MASTER, drm_setmaster_ioctl, 0),
DRM_IOCTL_DEF(DRM_IOCTL_DROP_MASTER, drm_dropmaster_ioctl, 0),
- DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_ADD_CTX, drm_legacy_addctx, DRM_AUTH|DRM_ROOT_ONLY),
- DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_RM_CTX, drm_legacy_rmctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_MOD_CTX, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_GET_CTX, drm_legacy_getctx, DRM_AUTH),
- DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_SWITCH_CTX, drm_legacy_switchctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_NEW_CTX, drm_legacy_newctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_RES_CTX, drm_legacy_resctx, DRM_AUTH),
-
DRM_IOCTL_DEF(DRM_IOCTL_ADD_DRAW, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_RM_DRAW, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_LOCK, drm_legacy_lock, DRM_AUTH),
- DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_UNLOCK, drm_legacy_unlock, DRM_AUTH),
-
DRM_IOCTL_DEF(DRM_IOCTL_FINISH, drm_noop, DRM_AUTH),
- DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_ADD_BUFS, drm_legacy_addbufs, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_MARK_BUFS, drm_legacy_markbufs, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_INFO_BUFS, drm_legacy_infobufs, DRM_AUTH),
- DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_MAP_BUFS, drm_legacy_mapbufs, DRM_AUTH),
- DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_FREE_BUFS, drm_legacy_freebufs, DRM_AUTH),
- DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_DMA, drm_legacy_dma_ioctl, DRM_AUTH),
- DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_CONTROL, drm_legacy_irq_control, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
-
-#if IS_ENABLED(CONFIG_AGP)
- DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_AGP_ACQUIRE, drm_legacy_agp_acquire_ioctl,
- DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_AGP_RELEASE, drm_legacy_agp_release_ioctl,
- DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_AGP_ENABLE, drm_legacy_agp_enable_ioctl,
- DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_AGP_INFO, drm_legacy_agp_info_ioctl, DRM_AUTH),
- DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_AGP_ALLOC, drm_legacy_agp_alloc_ioctl,
- DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_AGP_FREE, drm_legacy_agp_free_ioctl,
- DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_AGP_BIND, drm_legacy_agp_bind_ioctl,
- DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_AGP_UNBIND, drm_legacy_agp_unbind_ioctl,
- DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
-#endif
-
- DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_SG_ALLOC, drm_legacy_sg_alloc, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_SG_FREE, drm_legacy_sg_free, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
-
- DRM_IOCTL_DEF(DRM_IOCTL_WAIT_VBLANK, drm_wait_vblank_ioctl, DRM_UNLOCKED),
-
- DRM_IOCTL_DEF(DRM_IOCTL_MODESET_CTL, drm_legacy_modeset_ctl_ioctl, 0),
+ DRM_IOCTL_DEF(DRM_IOCTL_WAIT_VBLANK, drm_wait_vblank_ioctl, 0),
DRM_IOCTL_DEF(DRM_IOCTL_UPDATE_DRAW, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
@@ -775,7 +729,7 @@ long drm_ioctl_kernel(struct file *file, drm_ioctl_t *func, void *kdata,
{
struct drm_file *file_priv = file->private_data;
struct drm_device *dev = file_priv->minor->dev;
- int retcode;
+ int ret;
/* Update drm_file owner if fd was passed along. */
drm_file_update_pid(file_priv);
@@ -783,20 +737,11 @@ long drm_ioctl_kernel(struct file *file, drm_ioctl_t *func, void *kdata,
if (drm_dev_is_unplugged(dev))
return -ENODEV;
- retcode = drm_ioctl_permit(flags, file_priv);
- if (unlikely(retcode))
- return retcode;
-
- /* Enforce sane locking for modern driver ioctls. */
- if (likely(!drm_core_check_feature(dev, DRIVER_LEGACY)) ||
- (flags & DRM_UNLOCKED))
- retcode = func(dev, kdata, file_priv);
- else {
- mutex_lock(&drm_global_mutex);
- retcode = func(dev, kdata, file_priv);
- mutex_unlock(&drm_global_mutex);
- }
- return retcode;
+ ret = drm_ioctl_permit(flags, file_priv);
+ if (unlikely(ret))
+ return ret;
+
+ return func(dev, kdata, file_priv);
}
EXPORT_SYMBOL(drm_ioctl_kernel);
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
deleted file mode 100644
index d327638e15ee..000000000000
--- a/drivers/gpu/drm/drm_irq.c
+++ /dev/null
@@ -1,204 +0,0 @@
-/*
- * drm_irq.c IRQ and vblank support
- *
- * \author Rickard E. (Rik) Faith <[email protected]>
- * \author Gareth Hughes <[email protected]>
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
-/*
- * Created: Fri Mar 19 14:30:16 1999 by [email protected]
- *
- * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
- * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
-
-#include <linux/export.h>
-#include <linux/interrupt.h> /* For task queue support */
-#include <linux/pci.h>
-#include <linux/vgaarb.h>
-
-#include <drm/drm.h>
-#include <drm/drm_device.h>
-#include <drm/drm_drv.h>
-#include <drm/drm_legacy.h>
-#include <drm/drm_print.h>
-#include <drm/drm_vblank.h>
-
-#include "drm_internal.h"
-
-static int drm_legacy_irq_install(struct drm_device *dev, int irq)
-{
- int ret;
- unsigned long sh_flags = 0;
-
- if (irq == 0)
- return -EINVAL;
-
- if (dev->irq_enabled)
- return -EBUSY;
- dev->irq_enabled = true;
-
- DRM_DEBUG("irq=%d\n", irq);
-
- /* Before installing handler */
- if (dev->driver->irq_preinstall)
- dev->driver->irq_preinstall(dev);
-
- /* PCI devices require shared interrupts. */
- if (dev_is_pci(dev->dev))
- sh_flags = IRQF_SHARED;
-
- ret = request_irq(irq, dev->driver->irq_handler,
- sh_flags, dev->driver->name, dev);
-
- if (ret < 0) {
- dev->irq_enabled = false;
- return ret;
- }
-
- /* After installing handler */
- if (dev->driver->irq_postinstall)
- ret = dev->driver->irq_postinstall(dev);
-
- if (ret < 0) {
- dev->irq_enabled = false;
- if (drm_core_check_feature(dev, DRIVER_LEGACY))
- vga_client_unregister(to_pci_dev(dev->dev));
- free_irq(irq, dev);
- } else {
- dev->irq = irq;
- }
-
- return ret;
-}
-
-int drm_legacy_irq_uninstall(struct drm_device *dev)
-{
- unsigned long irqflags;
- bool irq_enabled;
- int i;
-
- irq_enabled = dev->irq_enabled;
- dev->irq_enabled = false;
-
- /*
- * Wake up any waiters so they don't hang. This is just to paper over
- * issues for UMS drivers which aren't in full control of their
- * vblank/irq handling. KMS drivers must ensure that vblanks are all
- * disabled when uninstalling the irq handler.
- */
- if (drm_dev_has_vblank(dev)) {
- spin_lock_irqsave(&dev->vbl_lock, irqflags);
- for (i = 0; i < dev->num_crtcs; i++) {
- struct drm_vblank_crtc *vblank = &dev->vblank[i];
-
- if (!vblank->enabled)
- continue;
-
- WARN_ON(drm_core_check_feature(dev, DRIVER_MODESET));
-
- drm_vblank_disable_and_save(dev, i);
- wake_up(&vblank->queue);
- }
- spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
- }
-
- if (!irq_enabled)
- return -EINVAL;
-
- DRM_DEBUG("irq=%d\n", dev->irq);
-
- if (drm_core_check_feature(dev, DRIVER_LEGACY))
- vga_client_unregister(to_pci_dev(dev->dev));
-
- if (dev->driver->irq_uninstall)
- dev->driver->irq_uninstall(dev);
-
- free_irq(dev->irq, dev);
-
- return 0;
-}
-EXPORT_SYMBOL(drm_legacy_irq_uninstall);
-
-int drm_legacy_irq_control(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_control *ctl = data;
- int ret = 0, irq;
- struct pci_dev *pdev;
-
- /* if we haven't irq we fallback for compatibility reasons -
- * this used to be a separate function in drm_dma.h
- */
-
- if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
- return 0;
- if (!drm_core_check_feature(dev, DRIVER_LEGACY))
- return 0;
- /* UMS was only ever supported on pci devices. */
- if (WARN_ON(!dev_is_pci(dev->dev)))
- return -EINVAL;
-
- switch (ctl->func) {
- case DRM_INST_HANDLER:
- pdev = to_pci_dev(dev->dev);
- irq = pdev->irq;
-
- if (dev->if_version < DRM_IF_VERSION(1, 2) &&
- ctl->irq != irq)
- return -EINVAL;
- mutex_lock(&dev->struct_mutex);
- ret = drm_legacy_irq_install(dev, irq);
- mutex_unlock(&dev->struct_mutex);
-
- return ret;
- case DRM_UNINST_HANDLER:
- mutex_lock(&dev->struct_mutex);
- ret = drm_legacy_irq_uninstall(dev);
- mutex_unlock(&dev->struct_mutex);
-
- return ret;
- default:
- return -EINVAL;
- }
-}
diff --git a/drivers/gpu/drm/drm_legacy.h b/drivers/gpu/drm/drm_legacy.h
deleted file mode 100644
index 70c9dba114a6..000000000000
--- a/drivers/gpu/drm/drm_legacy.h
+++ /dev/null
@@ -1,290 +0,0 @@
-#ifndef __DRM_LEGACY_H__
-#define __DRM_LEGACY_H__
-
-/*
- * Copyright (c) 2014 David Herrmann <[email protected]>
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
-/*
- * This file contains legacy interfaces that modern drm drivers
- * should no longer be using. They cannot be removed as legacy
- * drivers use them, and removing them are API breaks.
- */
-#include <linux/list.h>
-
-#include <drm/drm.h>
-#include <drm/drm_device.h>
-#include <drm/drm_legacy.h>
-
-struct agp_memory;
-struct drm_buf_desc;
-struct drm_device;
-struct drm_file;
-struct drm_hash_item;
-struct drm_open_hash;
-
-/*
- * Hash-table Support
- */
-
-#define drm_hash_entry(_ptr, _type, _member) container_of(_ptr, _type, _member)
-
-/* drm_hashtab.c */
-#if IS_ENABLED(CONFIG_DRM_LEGACY)
-int drm_ht_create(struct drm_open_hash *ht, unsigned int order);
-int drm_ht_insert_item(struct drm_open_hash *ht, struct drm_hash_item *item);
-int drm_ht_just_insert_please(struct drm_open_hash *ht, struct drm_hash_item *item,
- unsigned long seed, int bits, int shift,
- unsigned long add);
-int drm_ht_find_item(struct drm_open_hash *ht, unsigned long key, struct drm_hash_item **item);
-
-void drm_ht_verbose_list(struct drm_open_hash *ht, unsigned long key);
-int drm_ht_remove_key(struct drm_open_hash *ht, unsigned long key);
-int drm_ht_remove_item(struct drm_open_hash *ht, struct drm_hash_item *item);
-void drm_ht_remove(struct drm_open_hash *ht);
-#endif
-
-/*
- * RCU-safe interface
- *
- * The user of this API needs to make sure that two or more instances of the
- * hash table manipulation functions are never run simultaneously.
- * The lookup function drm_ht_find_item_rcu may, however, run simultaneously
- * with any of the manipulation functions as long as it's called from within
- * an RCU read-locked section.
- */
-#define drm_ht_insert_item_rcu drm_ht_insert_item
-#define drm_ht_just_insert_please_rcu drm_ht_just_insert_please
-#define drm_ht_remove_key_rcu drm_ht_remove_key
-#define drm_ht_remove_item_rcu drm_ht_remove_item
-#define drm_ht_find_item_rcu drm_ht_find_item
-
-/*
- * Generic DRM Contexts
- */
-
-#define DRM_KERNEL_CONTEXT 0
-#define DRM_RESERVED_CONTEXTS 1
-
-#if IS_ENABLED(CONFIG_DRM_LEGACY)
-void drm_legacy_ctxbitmap_init(struct drm_device *dev);
-void drm_legacy_ctxbitmap_cleanup(struct drm_device *dev);
-void drm_legacy_ctxbitmap_flush(struct drm_device *dev, struct drm_file *file);
-#else
-static inline void drm_legacy_ctxbitmap_init(struct drm_device *dev) {}
-static inline void drm_legacy_ctxbitmap_cleanup(struct drm_device *dev) {}
-static inline void drm_legacy_ctxbitmap_flush(struct drm_device *dev, struct drm_file *file) {}
-#endif
-
-void drm_legacy_ctxbitmap_free(struct drm_device *dev, int ctx_handle);
-
-#if IS_ENABLED(CONFIG_DRM_LEGACY)
-int drm_legacy_resctx(struct drm_device *d, void *v, struct drm_file *f);
-int drm_legacy_addctx(struct drm_device *d, void *v, struct drm_file *f);
-int drm_legacy_getctx(struct drm_device *d, void *v, struct drm_file *f);
-int drm_legacy_switchctx(struct drm_device *d, void *v, struct drm_file *f);
-int drm_legacy_newctx(struct drm_device *d, void *v, struct drm_file *f);
-int drm_legacy_rmctx(struct drm_device *d, void *v, struct drm_file *f);
-
-int drm_legacy_setsareactx(struct drm_device *d, void *v, struct drm_file *f);
-int drm_legacy_getsareactx(struct drm_device *d, void *v, struct drm_file *f);
-#endif
-
-/*
- * Generic Buffer Management
- */
-
-#define DRM_MAP_HASH_OFFSET 0x10000000
-
-#if IS_ENABLED(CONFIG_DRM_LEGACY)
-static inline int drm_legacy_create_map_hash(struct drm_device *dev)
-{
- return drm_ht_create(&dev->map_hash, 12);
-}
-
-static inline void drm_legacy_remove_map_hash(struct drm_device *dev)
-{
- drm_ht_remove(&dev->map_hash);
-}
-#else
-static inline int drm_legacy_create_map_hash(struct drm_device *dev)
-{
- return 0;
-}
-
-static inline void drm_legacy_remove_map_hash(struct drm_device *dev) {}
-#endif
-
-
-#if IS_ENABLED(CONFIG_DRM_LEGACY)
-int drm_legacy_getmap_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-int drm_legacy_addmap_ioctl(struct drm_device *d, void *v, struct drm_file *f);
-int drm_legacy_rmmap_ioctl(struct drm_device *d, void *v, struct drm_file *f);
-
-int drm_legacy_addbufs(struct drm_device *d, void *v, struct drm_file *f);
-int drm_legacy_infobufs(struct drm_device *d, void *v, struct drm_file *f);
-int drm_legacy_markbufs(struct drm_device *d, void *v, struct drm_file *f);
-int drm_legacy_freebufs(struct drm_device *d, void *v, struct drm_file *f);
-int drm_legacy_mapbufs(struct drm_device *d, void *v, struct drm_file *f);
-int drm_legacy_dma_ioctl(struct drm_device *d, void *v, struct drm_file *f);
-#endif
-
-int __drm_legacy_infobufs(struct drm_device *, void *, int *,
- int (*)(void *, int, struct drm_buf_entry *));
-int __drm_legacy_mapbufs(struct drm_device *, void *, int *,
- void __user **,
- int (*)(void *, int, unsigned long, struct drm_buf *),
- struct drm_file *);
-
-#if IS_ENABLED(CONFIG_DRM_LEGACY)
-void drm_legacy_master_rmmaps(struct drm_device *dev,
- struct drm_master *master);
-void drm_legacy_rmmaps(struct drm_device *dev);
-#else
-static inline void drm_legacy_master_rmmaps(struct drm_device *dev,
- struct drm_master *master) {}
-static inline void drm_legacy_rmmaps(struct drm_device *dev) {}
-#endif
-
-#if IS_ENABLED(CONFIG_DRM_LEGACY)
-void drm_legacy_vma_flush(struct drm_device *d);
-#else
-static inline void drm_legacy_vma_flush(struct drm_device *d)
-{
- /* do nothing */
-}
-#endif
-
-/*
- * AGP Support
- */
-
-struct drm_agp_mem {
- unsigned long handle;
- struct agp_memory *memory;
- unsigned long bound;
- int pages;
- struct list_head head;
-};
-
-/* drm_agpsupport.c */
-#if IS_ENABLED(CONFIG_DRM_LEGACY) && IS_ENABLED(CONFIG_AGP)
-void drm_legacy_agp_clear(struct drm_device *dev);
-
-int drm_legacy_agp_acquire_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-int drm_legacy_agp_release_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-int drm_legacy_agp_enable_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-int drm_legacy_agp_info_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-int drm_legacy_agp_alloc_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-int drm_legacy_agp_free_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-int drm_legacy_agp_unbind_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-int drm_legacy_agp_bind_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-#else
-static inline void drm_legacy_agp_clear(struct drm_device *dev) {}
-#endif
-
-/* drm_lock.c */
-#if IS_ENABLED(CONFIG_DRM_LEGACY)
-int drm_legacy_lock(struct drm_device *d, void *v, struct drm_file *f);
-int drm_legacy_unlock(struct drm_device *d, void *v, struct drm_file *f);
-void drm_legacy_lock_release(struct drm_device *dev, struct file *filp);
-#else
-static inline void drm_legacy_lock_release(struct drm_device *dev, struct file *filp) {}
-#endif
-
-/* DMA support */
-#if IS_ENABLED(CONFIG_DRM_LEGACY)
-int drm_legacy_dma_setup(struct drm_device *dev);
-void drm_legacy_dma_takedown(struct drm_device *dev);
-#else
-static inline int drm_legacy_dma_setup(struct drm_device *dev)
-{
- return 0;
-}
-#endif
-
-void drm_legacy_free_buffer(struct drm_device *dev,
- struct drm_buf * buf);
-#if IS_ENABLED(CONFIG_DRM_LEGACY)
-void drm_legacy_reclaim_buffers(struct drm_device *dev,
- struct drm_file *filp);
-#else
-static inline void drm_legacy_reclaim_buffers(struct drm_device *dev,
- struct drm_file *filp) {}
-#endif
-
-/* Scatter Gather Support */
-#if IS_ENABLED(CONFIG_DRM_LEGACY)
-void drm_legacy_sg_cleanup(struct drm_device *dev);
-int drm_legacy_sg_alloc(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-int drm_legacy_sg_free(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-#endif
-
-#if IS_ENABLED(CONFIG_DRM_LEGACY)
-void drm_legacy_init_members(struct drm_device *dev);
-void drm_legacy_destroy_members(struct drm_device *dev);
-void drm_legacy_dev_reinit(struct drm_device *dev);
-int drm_legacy_setup(struct drm_device * dev);
-#else
-static inline void drm_legacy_init_members(struct drm_device *dev) {}
-static inline void drm_legacy_destroy_members(struct drm_device *dev) {}
-static inline void drm_legacy_dev_reinit(struct drm_device *dev) {}
-static inline int drm_legacy_setup(struct drm_device * dev) { return 0; }
-#endif
-
-#if IS_ENABLED(CONFIG_DRM_LEGACY)
-void drm_legacy_lock_master_cleanup(struct drm_device *dev, struct drm_master *master);
-#else
-static inline void drm_legacy_lock_master_cleanup(struct drm_device *dev, struct drm_master *master) {}
-#endif
-
-#if IS_ENABLED(CONFIG_DRM_LEGACY)
-void drm_master_legacy_init(struct drm_master *master);
-#else
-static inline void drm_master_legacy_init(struct drm_master *master) {}
-#endif
-
-/* drm_pci.c */
-#if IS_ENABLED(CONFIG_DRM_LEGACY) && IS_ENABLED(CONFIG_PCI)
-int drm_legacy_irq_by_busid(struct drm_device *dev, void *data, struct drm_file *file_priv);
-void drm_legacy_pci_agp_destroy(struct drm_device *dev);
-#else
-static inline int drm_legacy_irq_by_busid(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- return -EINVAL;
-}
-
-static inline void drm_legacy_pci_agp_destroy(struct drm_device *dev) {}
-#endif
-
-#endif /* __DRM_LEGACY_H__ */
diff --git a/drivers/gpu/drm/drm_legacy_misc.c b/drivers/gpu/drm/drm_legacy_misc.c
deleted file mode 100644
index d4c5434062d7..000000000000
--- a/drivers/gpu/drm/drm_legacy_misc.c
+++ /dev/null
@@ -1,105 +0,0 @@
-/*
- * \file drm_legacy_misc.c
- * Misc legacy support functions.
- *
- * \author Rickard E. (Rik) Faith <[email protected]>
- * \author Gareth Hughes <[email protected]>
- */
-
-/*
- * Created: Tue Feb 2 08:37:54 1999 by [email protected]
- *
- * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
- * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
-#include <drm/drm_device.h>
-#include <drm/drm_drv.h>
-#include <drm/drm_print.h>
-
-#include "drm_internal.h"
-#include "drm_legacy.h"
-
-void drm_legacy_init_members(struct drm_device *dev)
-{
- INIT_LIST_HEAD(&dev->ctxlist);
- INIT_LIST_HEAD(&dev->vmalist);
- INIT_LIST_HEAD(&dev->maplist);
- spin_lock_init(&dev->buf_lock);
- mutex_init(&dev->ctxlist_mutex);
-}
-
-void drm_legacy_destroy_members(struct drm_device *dev)
-{
- mutex_destroy(&dev->ctxlist_mutex);
-}
-
-int drm_legacy_setup(struct drm_device * dev)
-{
- int ret;
-
- if (dev->driver->firstopen &&
- drm_core_check_feature(dev, DRIVER_LEGACY)) {
- ret = dev->driver->firstopen(dev);
- if (ret != 0)
- return ret;
- }
-
- ret = drm_legacy_dma_setup(dev);
- if (ret < 0)
- return ret;
-
-
- DRM_DEBUG("\n");
- return 0;
-}
-
-void drm_legacy_dev_reinit(struct drm_device *dev)
-{
- if (dev->irq_enabled)
- drm_legacy_irq_uninstall(dev);
-
- mutex_lock(&dev->struct_mutex);
-
- drm_legacy_agp_clear(dev);
-
- drm_legacy_sg_cleanup(dev);
- drm_legacy_vma_flush(dev);
- drm_legacy_dma_takedown(dev);
-
- mutex_unlock(&dev->struct_mutex);
-
- dev->sigdata.lock = NULL;
-
- dev->context_flag = 0;
- dev->last_context = 0;
- dev->if_version = 0;
-
- DRM_DEBUG("lastclose completed\n");
-}
-
-void drm_master_legacy_init(struct drm_master *master)
-{
- spin_lock_init(&master->lock.spinlock);
- init_waitqueue_head(&master->lock.lock_queue);
-}
diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c
deleted file mode 100644
index 1efbd5389d89..000000000000
--- a/drivers/gpu/drm/drm_lock.c
+++ /dev/null
@@ -1,373 +0,0 @@
-/*
- * \file drm_lock.c
- * IOCTLs for locking
- *
- * \author Rickard E. (Rik) Faith <[email protected]>
- * \author Gareth Hughes <[email protected]>
- */
-
-/*
- * Created: Tue Feb 2 08:37:54 1999 by [email protected]
- *
- * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
- * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
-#include <linux/export.h>
-#include <linux/sched/signal.h>
-
-#include <drm/drm.h>
-#include <drm/drm_drv.h>
-#include <drm/drm_file.h>
-#include <drm/drm_print.h>
-
-#include "drm_internal.h"
-#include "drm_legacy.h"
-
-static int drm_lock_take(struct drm_lock_data *lock_data, unsigned int context);
-
-/*
- * Take the heavyweight lock.
- *
- * \param lock lock pointer.
- * \param context locking context.
- * \return one if the lock is held, or zero otherwise.
- *
- * Attempt to mark the lock as held by the given context, via the \p cmpxchg instruction.
- */
-static
-int drm_lock_take(struct drm_lock_data *lock_data,
- unsigned int context)
-{
- unsigned int old, new, prev;
- volatile unsigned int *lock = &lock_data->hw_lock->lock;
-
- spin_lock_bh(&lock_data->spinlock);
- do {
- old = *lock;
- if (old & _DRM_LOCK_HELD)
- new = old | _DRM_LOCK_CONT;
- else {
- new = context | _DRM_LOCK_HELD |
- ((lock_data->user_waiters + lock_data->kernel_waiters > 1) ?
- _DRM_LOCK_CONT : 0);
- }
- prev = cmpxchg(lock, old, new);
- } while (prev != old);
- spin_unlock_bh(&lock_data->spinlock);
-
- if (_DRM_LOCKING_CONTEXT(old) == context) {
- if (old & _DRM_LOCK_HELD) {
- if (context != DRM_KERNEL_CONTEXT) {
- DRM_ERROR("%d holds heavyweight lock\n",
- context);
- }
- return 0;
- }
- }
-
- if ((_DRM_LOCKING_CONTEXT(new)) == context && (new & _DRM_LOCK_HELD)) {
- /* Have lock */
- return 1;
- }
- return 0;
-}
-
-/*
- * This takes a lock forcibly and hands it to context. Should ONLY be used
- * inside *_unlock to give lock to kernel before calling *_dma_schedule.
- *
- * \param dev DRM device.
- * \param lock lock pointer.
- * \param context locking context.
- * \return always one.
- *
- * Resets the lock file pointer.
- * Marks the lock as held by the given context, via the \p cmpxchg instruction.
- */
-static int drm_lock_transfer(struct drm_lock_data *lock_data,
- unsigned int context)
-{
- unsigned int old, new, prev;
- volatile unsigned int *lock = &lock_data->hw_lock->lock;
-
- lock_data->file_priv = NULL;
- do {
- old = *lock;
- new = context | _DRM_LOCK_HELD;
- prev = cmpxchg(lock, old, new);
- } while (prev != old);
- return 1;
-}
-
-static int drm_legacy_lock_free(struct drm_lock_data *lock_data,
- unsigned int context)
-{
- unsigned int old, new, prev;
- volatile unsigned int *lock = &lock_data->hw_lock->lock;
-
- spin_lock_bh(&lock_data->spinlock);
- if (lock_data->kernel_waiters != 0) {
- drm_lock_transfer(lock_data, 0);
- lock_data->idle_has_lock = 1;
- spin_unlock_bh(&lock_data->spinlock);
- return 1;
- }
- spin_unlock_bh(&lock_data->spinlock);
-
- do {
- old = *lock;
- new = _DRM_LOCKING_CONTEXT(old);
- prev = cmpxchg(lock, old, new);
- } while (prev != old);
-
- if (_DRM_LOCK_IS_HELD(old) && _DRM_LOCKING_CONTEXT(old) != context) {
- DRM_ERROR("%d freed heavyweight lock held by %d\n",
- context, _DRM_LOCKING_CONTEXT(old));
- return 1;
- }
- wake_up_interruptible(&lock_data->lock_queue);
- return 0;
-}
-
-/*
- * Lock ioctl.
- *
- * \param inode device inode.
- * \param file_priv DRM file private.
- * \param cmd command.
- * \param arg user argument, pointing to a drm_lock structure.
- * \return zero on success or negative number on failure.
- *
- * Add the current task to the lock wait queue, and attempt to take to lock.
- */
-int drm_legacy_lock(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- DECLARE_WAITQUEUE(entry, current);
- struct drm_lock *lock = data;
- struct drm_master *master = file_priv->master;
- int ret = 0;
-
- if (!drm_core_check_feature(dev, DRIVER_LEGACY))
- return -EOPNOTSUPP;
-
- ++file_priv->lock_count;
-
- if (lock->context == DRM_KERNEL_CONTEXT) {
- DRM_ERROR("Process %d using kernel context %d\n",
- task_pid_nr(current), lock->context);
- return -EINVAL;
- }
-
- DRM_DEBUG("%d (pid %d) requests lock (0x%08x), flags = 0x%08x\n",
- lock->context, task_pid_nr(current),
- master->lock.hw_lock ? master->lock.hw_lock->lock : -1,
- lock->flags);
-
- add_wait_queue(&master->lock.lock_queue, &entry);
- spin_lock_bh(&master->lock.spinlock);
- master->lock.user_waiters++;
- spin_unlock_bh(&master->lock.spinlock);
-
- for (;;) {
- __set_current_state(TASK_INTERRUPTIBLE);
- if (!master->lock.hw_lock) {
- /* Device has been unregistered */
- send_sig(SIGTERM, current, 0);
- ret = -EINTR;
- break;
- }
- if (drm_lock_take(&master->lock, lock->context)) {
- master->lock.file_priv = file_priv;
- master->lock.lock_time = jiffies;
- break; /* Got lock */
- }
-
- /* Contention */
- mutex_unlock(&drm_global_mutex);
- schedule();
- mutex_lock(&drm_global_mutex);
- if (signal_pending(current)) {
- ret = -EINTR;
- break;
- }
- }
- spin_lock_bh(&master->lock.spinlock);
- master->lock.user_waiters--;
- spin_unlock_bh(&master->lock.spinlock);
- __set_current_state(TASK_RUNNING);
- remove_wait_queue(&master->lock.lock_queue, &entry);
-
- DRM_DEBUG("%d %s\n", lock->context,
- ret ? "interrupted" : "has lock");
- if (ret) return ret;
-
- /* don't set the block all signals on the master process for now
- * really probably not the correct answer but lets us debug xkb
- * xserver for now */
- if (!drm_is_current_master(file_priv)) {
- dev->sigdata.context = lock->context;
- dev->sigdata.lock = master->lock.hw_lock;
- }
-
- if (dev->driver->dma_quiescent && (lock->flags & _DRM_LOCK_QUIESCENT))
- {
- if (dev->driver->dma_quiescent(dev)) {
- DRM_DEBUG("%d waiting for DMA quiescent\n",
- lock->context);
- return -EBUSY;
- }
- }
-
- return 0;
-}
-
-/*
- * Unlock ioctl.
- *
- * \param inode device inode.
- * \param file_priv DRM file private.
- * \param cmd command.
- * \param arg user argument, pointing to a drm_lock structure.
- * \return zero on success or negative number on failure.
- *
- * Transfer and free the lock.
- */
-int drm_legacy_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv)
-{
- struct drm_lock *lock = data;
- struct drm_master *master = file_priv->master;
-
- if (!drm_core_check_feature(dev, DRIVER_LEGACY))
- return -EOPNOTSUPP;
-
- if (lock->context == DRM_KERNEL_CONTEXT) {
- DRM_ERROR("Process %d using kernel context %d\n",
- task_pid_nr(current), lock->context);
- return -EINVAL;
- }
-
- if (drm_legacy_lock_free(&master->lock, lock->context)) {
- /* FIXME: Should really bail out here. */
- }
-
- return 0;
-}
-
-/*
- * This function returns immediately and takes the hw lock
- * with the kernel context if it is free, otherwise it gets the highest priority when and if
- * it is eventually released.
- *
- * This guarantees that the kernel will _eventually_ have the lock _unless_ it is held
- * by a blocked process. (In the latter case an explicit wait for the hardware lock would cause
- * a deadlock, which is why the "idlelock" was invented).
- *
- * This should be sufficient to wait for GPU idle without
- * having to worry about starvation.
- */
-void drm_legacy_idlelock_take(struct drm_lock_data *lock_data)
-{
- int ret;
-
- spin_lock_bh(&lock_data->spinlock);
- lock_data->kernel_waiters++;
- if (!lock_data->idle_has_lock) {
-
- spin_unlock_bh(&lock_data->spinlock);
- ret = drm_lock_take(lock_data, DRM_KERNEL_CONTEXT);
- spin_lock_bh(&lock_data->spinlock);
-
- if (ret == 1)
- lock_data->idle_has_lock = 1;
- }
- spin_unlock_bh(&lock_data->spinlock);
-}
-EXPORT_SYMBOL(drm_legacy_idlelock_take);
-
-void drm_legacy_idlelock_release(struct drm_lock_data *lock_data)
-{
- unsigned int old, prev;
- volatile unsigned int *lock = &lock_data->hw_lock->lock;
-
- spin_lock_bh(&lock_data->spinlock);
- if (--lock_data->kernel_waiters == 0) {
- if (lock_data->idle_has_lock) {
- do {
- old = *lock;
- prev = cmpxchg(lock, old, DRM_KERNEL_CONTEXT);
- } while (prev != old);
- wake_up_interruptible(&lock_data->lock_queue);
- lock_data->idle_has_lock = 0;
- }
- }
- spin_unlock_bh(&lock_data->spinlock);
-}
-EXPORT_SYMBOL(drm_legacy_idlelock_release);
-
-static int drm_legacy_i_have_hw_lock(struct drm_device *dev,
- struct drm_file *file_priv)
-{
- struct drm_master *master = file_priv->master;
-
- return (file_priv->lock_count && master->lock.hw_lock &&
- _DRM_LOCK_IS_HELD(master->lock.hw_lock->lock) &&
- master->lock.file_priv == file_priv);
-}
-
-void drm_legacy_lock_release(struct drm_device *dev, struct file *filp)
-{
- struct drm_file *file_priv = filp->private_data;
-
- /* if the master has gone away we can't do anything with the lock */
- if (!dev->master)
- return;
-
- if (drm_legacy_i_have_hw_lock(dev, file_priv)) {
- DRM_DEBUG("File %p released, freeing lock for context %d\n",
- filp, _DRM_LOCKING_CONTEXT(file_priv->master->lock.hw_lock->lock));
- drm_legacy_lock_free(&file_priv->master->lock,
- _DRM_LOCKING_CONTEXT(file_priv->master->lock.hw_lock->lock));
- }
-}
-
-void drm_legacy_lock_master_cleanup(struct drm_device *dev, struct drm_master *master)
-{
- if (!drm_core_check_feature(dev, DRIVER_LEGACY))
- return;
-
- /*
- * Since the master is disappearing, so is the
- * possibility to lock.
- */
- mutex_lock(&dev->struct_mutex);
- if (master->lock.hw_lock) {
- if (dev->sigdata.lock == master->lock.hw_lock)
- dev->sigdata.lock = NULL;
- master->lock.hw_lock = NULL;
- master->lock.file_priv = NULL;
- wake_up_interruptible_all(&master->lock.lock_queue);
- }
- mutex_unlock(&dev->struct_mutex);
-}
diff --git a/drivers/gpu/drm/drm_memory.c b/drivers/gpu/drm/drm_memory.c
deleted file mode 100644
index d2e1dccd8113..000000000000
--- a/drivers/gpu/drm/drm_memory.c
+++ /dev/null
@@ -1,138 +0,0 @@
-/*
- * \file drm_memory.c
- * Memory management wrappers for DRM
- *
- * \author Rickard E. (Rik) Faith <[email protected]>
- * \author Gareth Hughes <[email protected]>
- */
-
-/*
- * Created: Thu Feb 4 14:00:34 1999 by [email protected]
- *
- * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
- * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
-#include <linux/export.h>
-#include <linux/highmem.h>
-#include <linux/pci.h>
-#include <linux/vmalloc.h>
-
-#include <drm/drm_cache.h>
-#include <drm/drm_device.h>
-
-#include "drm_legacy.h"
-
-#if IS_ENABLED(CONFIG_AGP)
-
-#ifdef HAVE_PAGE_AGP
-# include <asm/agp.h>
-#else
-# ifdef __powerpc__
-# define PAGE_AGP pgprot_noncached_wc(PAGE_KERNEL)
-# else
-# define PAGE_AGP PAGE_KERNEL
-# endif
-#endif
-
-static void *agp_remap(unsigned long offset, unsigned long size,
- struct drm_device *dev)
-{
- unsigned long i, num_pages =
- PAGE_ALIGN(size) / PAGE_SIZE;
- struct drm_agp_mem *agpmem;
- struct page **page_map;
- struct page **phys_page_map;
- void *addr;
-
- size = PAGE_ALIGN(size);
-
-#ifdef __alpha__
- offset -= dev->hose->mem_space->start;
-#endif
-
- list_for_each_entry(agpmem, &dev->agp->memory, head)
- if (agpmem->bound <= offset
- && (agpmem->bound + (agpmem->pages << PAGE_SHIFT)) >=
- (offset + size))
- break;
- if (&agpmem->head == &dev->agp->memory)
- return NULL;
-
- /*
- * OK, we're mapping AGP space on a chipset/platform on which memory accesses by
- * the CPU do not get remapped by the GART. We fix this by using the kernel's
- * page-table instead (that's probably faster anyhow...).
- */
- /* note: use vmalloc() because num_pages could be large... */
- page_map = vmalloc(array_size(num_pages, sizeof(struct page *)));
- if (!page_map)
- return NULL;
-
- phys_page_map = (agpmem->memory->pages + (offset - agpmem->bound) / PAGE_SIZE);
- for (i = 0; i < num_pages; ++i)
- page_map[i] = phys_page_map[i];
- addr = vmap(page_map, num_pages, VM_IOREMAP, PAGE_AGP);
- vfree(page_map);
-
- return addr;
-}
-
-#else /* CONFIG_AGP */
-static inline void *agp_remap(unsigned long offset, unsigned long size,
- struct drm_device *dev)
-{
- return NULL;
-}
-
-#endif /* CONFIG_AGP */
-
-void drm_legacy_ioremap(struct drm_local_map *map, struct drm_device *dev)
-{
- if (dev->agp && dev->agp->cant_use_aperture && map->type == _DRM_AGP)
- map->handle = agp_remap(map->offset, map->size, dev);
- else
- map->handle = ioremap(map->offset, map->size);
-}
-EXPORT_SYMBOL(drm_legacy_ioremap);
-
-void drm_legacy_ioremap_wc(struct drm_local_map *map, struct drm_device *dev)
-{
- if (dev->agp && dev->agp->cant_use_aperture && map->type == _DRM_AGP)
- map->handle = agp_remap(map->offset, map->size, dev);
- else
- map->handle = ioremap_wc(map->offset, map->size);
-}
-EXPORT_SYMBOL(drm_legacy_ioremap_wc);
-
-void drm_legacy_ioremapfree(struct drm_local_map *map, struct drm_device *dev)
-{
- if (!map->handle || !map->size)
- return;
-
- if (dev->agp && dev->agp->cant_use_aperture && map->type == _DRM_AGP)
- vunmap(map->handle);
- else
- iounmap(map->handle);
-}
-EXPORT_SYMBOL(drm_legacy_ioremapfree);
diff --git a/drivers/gpu/drm/drm_mipi_dsi.c b/drivers/gpu/drm/drm_mipi_dsi.c
index 14201f73aab1..843a6dbda93a 100644
--- a/drivers/gpu/drm/drm_mipi_dsi.c
+++ b/drivers/gpu/drm/drm_mipi_dsi.c
@@ -347,7 +347,8 @@ static int mipi_dsi_remove_device_fn(struct device *dev, void *priv)
{
struct mipi_dsi_device *dsi = to_mipi_dsi_device(dev);
- mipi_dsi_detach(dsi);
+ if (dsi->attached)
+ mipi_dsi_detach(dsi);
mipi_dsi_device_unregister(dsi);
return 0;
@@ -370,11 +371,18 @@ EXPORT_SYMBOL(mipi_dsi_host_unregister);
int mipi_dsi_attach(struct mipi_dsi_device *dsi)
{
const struct mipi_dsi_host_ops *ops = dsi->host->ops;
+ int ret;
if (!ops || !ops->attach)
return -ENOSYS;
- return ops->attach(dsi->host, dsi);
+ ret = ops->attach(dsi->host, dsi);
+ if (ret)
+ return ret;
+
+ dsi->attached = true;
+
+ return 0;
}
EXPORT_SYMBOL(mipi_dsi_attach);
@@ -386,9 +394,14 @@ int mipi_dsi_detach(struct mipi_dsi_device *dsi)
{
const struct mipi_dsi_host_ops *ops = dsi->host->ops;
+ if (WARN_ON(!dsi->attached))
+ return -EINVAL;
+
if (!ops || !ops->detach)
return -ENOSYS;
+ dsi->attached = false;
+
return ops->detach(dsi->host, dsi);
}
EXPORT_SYMBOL(mipi_dsi_detach);
diff --git a/drivers/gpu/drm/drm_mode_object.c b/drivers/gpu/drm/drm_mode_object.c
index ac0d2ce3f870..0e8355063eee 100644
--- a/drivers/gpu/drm/drm_mode_object.c
+++ b/drivers/gpu/drm/drm_mode_object.c
@@ -538,7 +538,7 @@ retry:
obj_to_connector(obj),
prop_value);
} else {
- ret = drm_atomic_set_property(state, file_priv, obj, prop, prop_value);
+ ret = drm_atomic_set_property(state, file_priv, obj, prop, prop_value, false);
if (ret)
goto out;
ret = drm_atomic_commit(state);
diff --git a/drivers/gpu/drm/drm_pci.c b/drivers/gpu/drm/drm_pci.c
index 39d35fc3a43b..c585f1e8803e 100644
--- a/drivers/gpu/drm/drm_pci.c
+++ b/drivers/gpu/drm/drm_pci.c
@@ -29,18 +29,12 @@
#include <linux/pci.h>
#include <linux/slab.h>
+#include <drm/drm_auth.h>
#include <drm/drm.h>
#include <drm/drm_drv.h>
#include <drm/drm_print.h>
#include "drm_internal.h"
-#include "drm_legacy.h"
-
-#ifdef CONFIG_DRM_LEGACY
-/* List of devices hanging off drivers with stealth attach. */
-static LIST_HEAD(legacy_dev_list);
-static DEFINE_MUTEX(legacy_dev_list_lock);
-#endif
static int drm_get_pci_domain(struct drm_device *dev)
{
@@ -71,199 +65,3 @@ int drm_pci_set_busid(struct drm_device *dev, struct drm_master *master)
master->unique_len = strlen(master->unique);
return 0;
}
-
-#ifdef CONFIG_DRM_LEGACY
-
-static int drm_legacy_pci_irq_by_busid(struct drm_device *dev, struct drm_irq_busid *p)
-{
- struct pci_dev *pdev = to_pci_dev(dev->dev);
-
- if ((p->busnum >> 8) != drm_get_pci_domain(dev) ||
- (p->busnum & 0xff) != pdev->bus->number ||
- p->devnum != PCI_SLOT(pdev->devfn) || p->funcnum != PCI_FUNC(pdev->devfn))
- return -EINVAL;
-
- p->irq = pdev->irq;
-
- DRM_DEBUG("%d:%d:%d => IRQ %d\n", p->busnum, p->devnum, p->funcnum,
- p->irq);
- return 0;
-}
-
-/**
- * drm_legacy_irq_by_busid - Get interrupt from bus ID
- * @dev: DRM device
- * @data: IOCTL parameter pointing to a drm_irq_busid structure
- * @file_priv: DRM file private.
- *
- * Finds the PCI device with the specified bus id and gets its IRQ number.
- * This IOCTL is deprecated, and will now return EINVAL for any busid not equal
- * to that of the device that this DRM instance attached to.
- *
- * Return: 0 on success or a negative error code on failure.
- */
-int drm_legacy_irq_by_busid(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_irq_busid *p = data;
-
- if (!drm_core_check_feature(dev, DRIVER_LEGACY))
- return -EOPNOTSUPP;
-
- /* UMS was only ever support on PCI devices. */
- if (WARN_ON(!dev_is_pci(dev->dev)))
- return -EINVAL;
-
- if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
- return -EOPNOTSUPP;
-
- return drm_legacy_pci_irq_by_busid(dev, p);
-}
-
-void drm_legacy_pci_agp_destroy(struct drm_device *dev)
-{
- if (dev->agp) {
- arch_phys_wc_del(dev->agp->agp_mtrr);
- drm_legacy_agp_clear(dev);
- kfree(dev->agp);
- dev->agp = NULL;
- }
-}
-
-static void drm_legacy_pci_agp_init(struct drm_device *dev)
-{
- if (drm_core_check_feature(dev, DRIVER_USE_AGP)) {
- if (pci_find_capability(to_pci_dev(dev->dev), PCI_CAP_ID_AGP))
- dev->agp = drm_legacy_agp_init(dev);
- if (dev->agp) {
- dev->agp->agp_mtrr = arch_phys_wc_add(
- dev->agp->agp_info.aper_base,
- dev->agp->agp_info.aper_size *
- 1024 * 1024);
- }
- }
-}
-
-static int drm_legacy_get_pci_dev(struct pci_dev *pdev,
- const struct pci_device_id *ent,
- const struct drm_driver *driver)
-{
- struct drm_device *dev;
- int ret;
-
- DRM_DEBUG("\n");
-
- dev = drm_dev_alloc(driver, &pdev->dev);
- if (IS_ERR(dev))
- return PTR_ERR(dev);
-
- ret = pci_enable_device(pdev);
- if (ret)
- goto err_free;
-
-#ifdef __alpha__
- dev->hose = pdev->sysdata;
-#endif
-
- drm_legacy_pci_agp_init(dev);
-
- ret = drm_dev_register(dev, ent->driver_data);
- if (ret)
- goto err_agp;
-
- if (drm_core_check_feature(dev, DRIVER_LEGACY)) {
- mutex_lock(&legacy_dev_list_lock);
- list_add_tail(&dev->legacy_dev_list, &legacy_dev_list);
- mutex_unlock(&legacy_dev_list_lock);
- }
-
- return 0;
-
-err_agp:
- drm_legacy_pci_agp_destroy(dev);
- pci_disable_device(pdev);
-err_free:
- drm_dev_put(dev);
- return ret;
-}
-
-/**
- * drm_legacy_pci_init - shadow-attach a legacy DRM PCI driver
- * @driver: DRM device driver
- * @pdriver: PCI device driver
- *
- * This is only used by legacy dri1 drivers and deprecated.
- *
- * Return: 0 on success or a negative error code on failure.
- */
-int drm_legacy_pci_init(const struct drm_driver *driver,
- struct pci_driver *pdriver)
-{
- struct pci_dev *pdev = NULL;
- const struct pci_device_id *pid;
- int i;
-
- DRM_DEBUG("\n");
-
- if (WARN_ON(!(driver->driver_features & DRIVER_LEGACY)))
- return -EINVAL;
-
- /* If not using KMS, fall back to stealth mode manual scanning. */
- for (i = 0; pdriver->id_table[i].vendor != 0; i++) {
- pid = &pdriver->id_table[i];
-
- /* Loop around setting up a DRM device for each PCI device
- * matching our ID and device class. If we had the internal
- * function that pci_get_subsys and pci_get_class used, we'd
- * be able to just pass pid in instead of doing a two-stage
- * thing.
- */
- pdev = NULL;
- while ((pdev =
- pci_get_subsys(pid->vendor, pid->device, pid->subvendor,
- pid->subdevice, pdev)) != NULL) {
- if ((pdev->class & pid->class_mask) != pid->class)
- continue;
-
- /* stealth mode requires a manual probe */
- pci_dev_get(pdev);
- drm_legacy_get_pci_dev(pdev, pid, driver);
- }
- }
- return 0;
-}
-EXPORT_SYMBOL(drm_legacy_pci_init);
-
-/**
- * drm_legacy_pci_exit - unregister shadow-attach legacy DRM driver
- * @driver: DRM device driver
- * @pdriver: PCI device driver
- *
- * Unregister a DRM driver shadow-attached through drm_legacy_pci_init(). This
- * is deprecated and only used by dri1 drivers.
- */
-void drm_legacy_pci_exit(const struct drm_driver *driver,
- struct pci_driver *pdriver)
-{
- struct drm_device *dev, *tmp;
-
- DRM_DEBUG("\n");
-
- if (!(driver->driver_features & DRIVER_LEGACY)) {
- WARN_ON(1);
- } else {
- mutex_lock(&legacy_dev_list_lock);
- list_for_each_entry_safe(dev, tmp, &legacy_dev_list,
- legacy_dev_list) {
- if (dev->driver == driver) {
- list_del(&dev->legacy_dev_list);
- drm_put_dev(dev);
- }
- }
- mutex_unlock(&legacy_dev_list_lock);
- }
- DRM_INFO("Module unloaded\n");
-}
-EXPORT_SYMBOL(drm_legacy_pci_exit);
-
-#endif
diff --git a/drivers/gpu/drm/drm_plane.c b/drivers/gpu/drm/drm_plane.c
index 24e7998d1731..9e8e4c60983d 100644
--- a/drivers/gpu/drm/drm_plane.c
+++ b/drivers/gpu/drm/drm_plane.c
@@ -230,6 +230,103 @@ static int create_in_format_blob(struct drm_device *dev, struct drm_plane *plane
return 0;
}
+/**
+ * DOC: hotspot properties
+ *
+ * HOTSPOT_X: property to set mouse hotspot x offset.
+ * HOTSPOT_Y: property to set mouse hotspot y offset.
+ *
+ * When the plane is being used as a cursor image to display a mouse pointer,
+ * the "hotspot" is the offset within the cursor image where mouse events
+ * are expected to go.
+ *
+ * Positive values move the hotspot from the top-left corner of the cursor
+ * plane towards the right and bottom.
+ *
+ * Most display drivers do not need this information because the
+ * hotspot is not actually connected to anything visible on screen.
+ * However, this is necessary for display drivers like the para-virtualized
+ * drivers (eg qxl, vbox, virtio, vmwgfx), that are attached to a user console
+ * with a mouse pointer. Since these consoles are often being remoted over a
+ * network, they would otherwise have to wait to display the pointer movement to
+ * the user until a full network round-trip has occurred. New mouse events have
+ * to be sent from the user's console, over the network to the virtual input
+ * devices, forwarded to the desktop for processing, and then the cursor plane's
+ * position can be updated and sent back to the user's console over the network.
+ * Instead, with the hotspot information, the console can anticipate the new
+ * location, and draw the mouse cursor there before the confirmation comes in.
+ * To do that correctly, the user's console must be able predict how the
+ * desktop will process mouse events, which normally requires the desktop's
+ * mouse topology information, ie where each CRTC sits in the mouse coordinate
+ * space. This is typically sent to the para-virtualized drivers using some
+ * driver-specific method, and the driver then forwards it to the console by
+ * way of the virtual display device or hypervisor.
+ *
+ * The assumption is generally made that there is only one cursor plane being
+ * used this way at a time, and that the desktop is feeding all mouse devices
+ * into the same global pointer. Para-virtualized drivers that require this
+ * should only be exposing a single cursor plane, or find some other way
+ * to coordinate with a userspace desktop that supports multiple pointers.
+ * If the hotspot properties are set, the cursor plane is therefore assumed to be
+ * used only for displaying a mouse cursor image, and the position of the combined
+ * cursor plane + offset can therefore be used for coordinating with input from a
+ * mouse device.
+ *
+ * The cursor will then be drawn either at the location of the plane in the CRTC
+ * console, or as a free-floating cursor plane on the user's console
+ * corresponding to their desktop mouse position.
+ *
+ * DRM clients which would like to work correctly on drivers which expose
+ * hotspot properties should advertise DRM_CLIENT_CAP_CURSOR_PLANE_HOTSPOT.
+ * Setting this property on drivers which do not special case
+ * cursor planes will return EOPNOTSUPP, which can be used by userspace to
+ * gauge requirements of the hardware/drivers they're running on. Advertising
+ * DRM_CLIENT_CAP_CURSOR_PLANE_HOTSPOT implies that the userspace client will be
+ * correctly setting the hotspot properties.
+ */
+
+/**
+ * drm_plane_create_hotspot_properties - creates the mouse hotspot
+ * properties and attaches them to the given cursor plane
+ *
+ * @plane: drm cursor plane
+ *
+ * This function enables the mouse hotspot property on a given
+ * cursor plane. Look at the documentation for hotspot properties
+ * to get a better understanding for what they're used for.
+ *
+ * RETURNS:
+ * Zero for success or -errno
+ */
+static int drm_plane_create_hotspot_properties(struct drm_plane *plane)
+{
+ struct drm_property *prop_x;
+ struct drm_property *prop_y;
+
+ drm_WARN_ON(plane->dev,
+ !drm_core_check_feature(plane->dev,
+ DRIVER_CURSOR_HOTSPOT));
+
+ prop_x = drm_property_create_signed_range(plane->dev, 0, "HOTSPOT_X",
+ INT_MIN, INT_MAX);
+ if (IS_ERR(prop_x))
+ return PTR_ERR(prop_x);
+
+ prop_y = drm_property_create_signed_range(plane->dev, 0, "HOTSPOT_Y",
+ INT_MIN, INT_MAX);
+ if (IS_ERR(prop_y)) {
+ drm_property_destroy(plane->dev, prop_x);
+ return PTR_ERR(prop_y);
+ }
+
+ drm_object_attach_property(&plane->base, prop_x, 0);
+ drm_object_attach_property(&plane->base, prop_y, 0);
+ plane->hotspot_x_property = prop_x;
+ plane->hotspot_y_property = prop_y;
+
+ return 0;
+}
+
__printf(9, 0)
static int __drm_universal_plane_init(struct drm_device *dev,
struct drm_plane *plane,
@@ -348,6 +445,10 @@ static int __drm_universal_plane_init(struct drm_device *dev,
drm_object_attach_property(&plane->base, config->prop_src_w, 0);
drm_object_attach_property(&plane->base, config->prop_src_h, 0);
}
+ if (drm_core_check_feature(dev, DRIVER_CURSOR_HOTSPOT) &&
+ type == DRM_PLANE_TYPE_CURSOR) {
+ drm_plane_create_hotspot_properties(plane);
+ }
if (format_modifier_count)
create_in_format_blob(dev, plane);
@@ -678,6 +779,19 @@ int drm_mode_getplane_res(struct drm_device *dev, void *data,
!file_priv->universal_planes)
continue;
+ /*
+ * If we're running on a virtualized driver then,
+ * unless userspace advertizes support for the
+ * virtualized cursor plane, disable cursor planes
+ * because they'll be broken due to missing cursor
+ * hotspot info.
+ */
+ if (plane->type == DRM_PLANE_TYPE_CURSOR &&
+ drm_core_check_feature(dev, DRIVER_CURSOR_HOTSPOT) &&
+ file_priv->atomic &&
+ !file_priv->supports_virtualized_cursor_plane)
+ continue;
+
if (drm_lease_held(file_priv, plane->base.id)) {
if (count < plane_resp->count_planes &&
put_user(plane->base.id, plane_ptr + count))
@@ -1052,8 +1166,10 @@ static int drm_mode_cursor_universal(struct drm_crtc *crtc,
return PTR_ERR(fb);
}
- fb->hot_x = req->hot_x;
- fb->hot_y = req->hot_y;
+ if (plane->hotspot_x_property && plane->state)
+ plane->state->hotspot_x = req->hot_x;
+ if (plane->hotspot_y_property && plane->state)
+ plane->state->hotspot_y = req->hot_y;
} else {
fb = NULL;
}
@@ -1442,6 +1558,36 @@ out:
* Drivers implementing damage can use drm_atomic_helper_damage_iter_init() and
* drm_atomic_helper_damage_iter_next() helper iterator function to get damage
* rectangles clipped to &drm_plane_state.src.
+ *
+ * Note that there are two types of damage handling: frame damage and buffer
+ * damage, the type of damage handling implemented depends on a driver's upload
+ * target. Drivers implementing a per-plane or per-CRTC upload target need to
+ * handle frame damage, while drivers implementing a per-buffer upload target
+ * need to handle buffer damage.
+ *
+ * The existing damage helpers only support the frame damage type, there is no
+ * buffer age support or similar damage accumulation algorithm implemented yet.
+ *
+ * Only drivers handling frame damage can use the mentioned damage helpers to
+ * iterate over the damaged regions. Drivers that handle buffer damage, must set
+ * &drm_plane_state.ignore_damage_clips for drm_atomic_helper_damage_iter_init()
+ * to know that damage clips should be ignored and return &drm_plane_state.src
+ * as the damage rectangle, to force a full plane update.
+ *
+ * Drivers with a per-buffer upload target could compare the &drm_plane_state.fb
+ * of the old and new plane states to determine if the framebuffer attached to a
+ * plane has changed or not since the last plane update. If &drm_plane_state.fb
+ * has changed, then &drm_plane_state.ignore_damage_clips must be set to true.
+ *
+ * That is because drivers with a per-plane upload target, expect the backing
+ * storage buffer to not change for a given plane. If the upload buffer changes
+ * between page flips, the new upload buffer has to be updated as a whole. This
+ * can be improved in the future if support for frame damage is added to the DRM
+ * damage helpers, similarly to how user-space already handle this case as it is
+ * explained in the following documents:
+ *
+ * https://registry.khronos.org/EGL/extensions/KHR/EGL_KHR_swap_buffers_with_damage.txt
+ * https://emersion.fr/blog/2019/intro-to-damage-tracking/
*/
/**
diff --git a/drivers/gpu/drm/drm_plane_helper.c b/drivers/gpu/drm/drm_plane_helper.c
index 5e95089676ff..7982be4b0306 100644
--- a/drivers/gpu/drm/drm_plane_helper.c
+++ b/drivers/gpu/drm/drm_plane_helper.c
@@ -279,35 +279,3 @@ void drm_plane_helper_destroy(struct drm_plane *plane)
kfree(plane);
}
EXPORT_SYMBOL(drm_plane_helper_destroy);
-
-/**
- * drm_plane_helper_atomic_check() - Helper to check plane atomic-state
- * @plane: plane to check
- * @state: atomic state object
- *
- * Provides a default plane-state check handler for planes whose atomic-state
- * scale and positioning are not expected to change since the plane is always
- * a fullscreen scanout buffer.
- *
- * This is often the case for the primary plane of simple framebuffers. See
- * also drm_crtc_helper_atomic_check() for the respective CRTC-state check
- * helper function.
- *
- * RETURNS:
- * Zero on success, or an errno code otherwise.
- */
-int drm_plane_helper_atomic_check(struct drm_plane *plane, struct drm_atomic_state *state)
-{
- struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state, plane);
- struct drm_crtc *new_crtc = new_plane_state->crtc;
- struct drm_crtc_state *new_crtc_state = NULL;
-
- if (new_crtc)
- new_crtc_state = drm_atomic_get_new_crtc_state(state, new_crtc);
-
- return drm_atomic_helper_check_plane_state(new_plane_state, new_crtc_state,
- DRM_PLANE_NO_SCALING,
- DRM_PLANE_NO_SCALING,
- false, false);
-}
-EXPORT_SYMBOL(drm_plane_helper_atomic_check);
diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c
index 63b709a67471..834a5e28abbe 100644
--- a/drivers/gpu/drm/drm_prime.c
+++ b/drivers/gpu/drm/drm_prime.c
@@ -278,7 +278,7 @@ void drm_gem_dmabuf_release(struct dma_buf *dma_buf)
}
EXPORT_SYMBOL(drm_gem_dmabuf_release);
-/*
+/**
* drm_gem_prime_fd_to_handle - PRIME import function for GEM drivers
* @dev: drm_device to import into
* @file_priv: drm file-private structure
@@ -292,9 +292,9 @@ EXPORT_SYMBOL(drm_gem_dmabuf_release);
*
* Returns 0 on success or a negative error code on failure.
*/
-static int drm_gem_prime_fd_to_handle(struct drm_device *dev,
- struct drm_file *file_priv, int prime_fd,
- uint32_t *handle)
+int drm_gem_prime_fd_to_handle(struct drm_device *dev,
+ struct drm_file *file_priv, int prime_fd,
+ uint32_t *handle)
{
struct dma_buf *dma_buf;
struct drm_gem_object *obj;
@@ -360,6 +360,7 @@ out_put:
dma_buf_put(dma_buf);
return ret;
}
+EXPORT_SYMBOL(drm_gem_prime_fd_to_handle);
int drm_prime_fd_to_handle_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
@@ -408,7 +409,7 @@ static struct dma_buf *export_and_register_object(struct drm_device *dev,
return dmabuf;
}
-/*
+/**
* drm_gem_prime_handle_to_fd - PRIME export function for GEM drivers
* @dev: dev to export the buffer from
* @file_priv: drm file-private structure
@@ -421,10 +422,10 @@ static struct dma_buf *export_and_register_object(struct drm_device *dev,
* The actual exporting from GEM object to a dma-buf is done through the
* &drm_gem_object_funcs.export callback.
*/
-static int drm_gem_prime_handle_to_fd(struct drm_device *dev,
- struct drm_file *file_priv, uint32_t handle,
- uint32_t flags,
- int *prime_fd)
+int drm_gem_prime_handle_to_fd(struct drm_device *dev,
+ struct drm_file *file_priv, uint32_t handle,
+ uint32_t flags,
+ int *prime_fd)
{
struct drm_gem_object *obj;
int ret = 0;
@@ -506,6 +507,7 @@ out_unlock:
return ret;
}
+EXPORT_SYMBOL(drm_gem_prime_handle_to_fd);
int drm_prime_handle_to_fd_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
@@ -864,9 +866,9 @@ EXPORT_SYMBOL(drm_prime_get_contiguous_size);
* @obj: GEM object to export
* @flags: flags like DRM_CLOEXEC and DRM_RDWR
*
- * This is the implementation of the &drm_gem_object_funcs.export functions
- * for GEM drivers using the PRIME helpers. It is used as the default for
- * drivers that do not set their own.
+ * This is the implementation of the &drm_gem_object_funcs.export functions for GEM drivers
+ * using the PRIME helpers. It is used as the default in
+ * drm_gem_prime_handle_to_fd().
*/
struct dma_buf *drm_gem_prime_export(struct drm_gem_object *obj,
int flags)
@@ -962,9 +964,10 @@ EXPORT_SYMBOL(drm_gem_prime_import_dev);
* @dev: drm_device to import into
* @dma_buf: dma-buf object to import
*
- * This is the implementation of the gem_prime_import functions for GEM
- * drivers using the PRIME helpers. It is the default for drivers that do
- * not set their own &drm_driver.gem_prime_import.
+ * This is the implementation of the gem_prime_import functions for GEM drivers
+ * using the PRIME helpers. Drivers can use this as their
+ * &drm_driver.gem_prime_import implementation. It is used as the default
+ * implementation in drm_gem_prime_fd_to_handle().
*
* Drivers must arrange to call drm_prime_gem_destroy() from their
* &drm_gem_object_funcs.free hook when using this function.
diff --git a/drivers/gpu/drm/drm_scatter.c b/drivers/gpu/drm/drm_scatter.c
deleted file mode 100644
index f4e6184d1877..000000000000
--- a/drivers/gpu/drm/drm_scatter.c
+++ /dev/null
@@ -1,220 +0,0 @@
-/*
- * \file drm_scatter.c
- * IOCTLs to manage scatter/gather memory
- *
- * \author Gareth Hughes <[email protected]>
- */
-
-/*
- * Created: Mon Dec 18 23:20:54 2000 by [email protected]
- *
- * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#include <linux/mm.h>
-#include <linux/slab.h>
-#include <linux/vmalloc.h>
-
-#include <drm/drm.h>
-#include <drm/drm_drv.h>
-#include <drm/drm_print.h>
-
-#include "drm_legacy.h"
-
-#define DEBUG_SCATTER 0
-
-static void drm_sg_cleanup(struct drm_sg_mem * entry)
-{
- struct page *page;
- int i;
-
- for (i = 0; i < entry->pages; i++) {
- page = entry->pagelist[i];
- if (page)
- ClearPageReserved(page);
- }
-
- vfree(entry->virtual);
-
- kfree(entry->busaddr);
- kfree(entry->pagelist);
- kfree(entry);
-}
-
-void drm_legacy_sg_cleanup(struct drm_device *dev)
-{
- if (drm_core_check_feature(dev, DRIVER_SG) && dev->sg &&
- drm_core_check_feature(dev, DRIVER_LEGACY)) {
- drm_sg_cleanup(dev->sg);
- dev->sg = NULL;
- }
-}
-#ifdef _LP64
-# define ScatterHandle(x) (unsigned int)((x >> 32) + (x & ((1L << 32) - 1)))
-#else
-# define ScatterHandle(x) (unsigned int)(x)
-#endif
-
-int drm_legacy_sg_alloc(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_scatter_gather *request = data;
- struct drm_sg_mem *entry;
- unsigned long pages, i, j;
-
- DRM_DEBUG("\n");
-
- if (!drm_core_check_feature(dev, DRIVER_LEGACY))
- return -EOPNOTSUPP;
-
- if (!drm_core_check_feature(dev, DRIVER_SG))
- return -EOPNOTSUPP;
-
- if (request->size > SIZE_MAX - PAGE_SIZE)
- return -EINVAL;
-
- if (dev->sg)
- return -EINVAL;
-
- entry = kzalloc(sizeof(*entry), GFP_KERNEL);
- if (!entry)
- return -ENOMEM;
-
- pages = (request->size + PAGE_SIZE - 1) / PAGE_SIZE;
- DRM_DEBUG("size=%ld pages=%ld\n", request->size, pages);
-
- entry->pages = pages;
- entry->pagelist = kcalloc(pages, sizeof(*entry->pagelist), GFP_KERNEL);
- if (!entry->pagelist) {
- kfree(entry);
- return -ENOMEM;
- }
-
- entry->busaddr = kcalloc(pages, sizeof(*entry->busaddr), GFP_KERNEL);
- if (!entry->busaddr) {
- kfree(entry->pagelist);
- kfree(entry);
- return -ENOMEM;
- }
-
- entry->virtual = vmalloc_32(pages << PAGE_SHIFT);
- if (!entry->virtual) {
- kfree(entry->busaddr);
- kfree(entry->pagelist);
- kfree(entry);
- return -ENOMEM;
- }
-
- /* This also forces the mapping of COW pages, so our page list
- * will be valid. Please don't remove it...
- */
- memset(entry->virtual, 0, pages << PAGE_SHIFT);
-
- entry->handle = ScatterHandle((unsigned long)entry->virtual);
-
- DRM_DEBUG("handle = %08lx\n", entry->handle);
- DRM_DEBUG("virtual = %p\n", entry->virtual);
-
- for (i = (unsigned long)entry->virtual, j = 0; j < pages;
- i += PAGE_SIZE, j++) {
- entry->pagelist[j] = vmalloc_to_page((void *)i);
- if (!entry->pagelist[j])
- goto failed;
- SetPageReserved(entry->pagelist[j]);
- }
-
- request->handle = entry->handle;
-
- dev->sg = entry;
-
-#if DEBUG_SCATTER
- /* Verify that each page points to its virtual address, and vice
- * versa.
- */
- {
- int error = 0;
-
- for (i = 0; i < pages; i++) {
- unsigned long *tmp;
-
- tmp = page_address(entry->pagelist[i]);
- for (j = 0;
- j < PAGE_SIZE / sizeof(unsigned long);
- j++, tmp++) {
- *tmp = 0xcafebabe;
- }
- tmp = (unsigned long *)((u8 *) entry->virtual +
- (PAGE_SIZE * i));
- for (j = 0;
- j < PAGE_SIZE / sizeof(unsigned long);
- j++, tmp++) {
- if (*tmp != 0xcafebabe && error == 0) {
- error = 1;
- DRM_ERROR("Scatter allocation error, "
- "pagelist does not match "
- "virtual mapping\n");
- }
- }
- tmp = page_address(entry->pagelist[i]);
- for (j = 0;
- j < PAGE_SIZE / sizeof(unsigned long);
- j++, tmp++) {
- *tmp = 0;
- }
- }
- if (error == 0)
- DRM_ERROR("Scatter allocation matches pagelist\n");
- }
-#endif
-
- return 0;
-
- failed:
- drm_sg_cleanup(entry);
- return -ENOMEM;
-}
-
-int drm_legacy_sg_free(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_scatter_gather *request = data;
- struct drm_sg_mem *entry;
-
- if (!drm_core_check_feature(dev, DRIVER_LEGACY))
- return -EOPNOTSUPP;
-
- if (!drm_core_check_feature(dev, DRIVER_SG))
- return -EOPNOTSUPP;
-
- entry = dev->sg;
- dev->sg = NULL;
-
- if (!entry || entry->handle != request->handle)
- return -EINVAL;
-
- DRM_DEBUG("virtual = %p\n", entry->virtual);
-
- drm_sg_cleanup(entry);
-
- return 0;
-}
diff --git a/drivers/gpu/drm/drm_syncobj.c b/drivers/gpu/drm/drm_syncobj.c
index 01da6789d044..cbb65b7ba425 100644
--- a/drivers/gpu/drm/drm_syncobj.c
+++ b/drivers/gpu/drm/drm_syncobj.c
@@ -126,6 +126,11 @@
* synchronize between the two.
* This requirement is inherited from the Vulkan fence API.
*
+ * If &DRM_SYNCOBJ_WAIT_FLAGS_WAIT_DEADLINE is set, the ioctl will also set
+ * a fence deadline hint on the backing fences before waiting, to provide the
+ * fence signaler with an appropriate sense of urgency. The deadline is
+ * specified as an absolute &CLOCK_MONOTONIC value in units of ns.
+ *
* Similarly, &DRM_IOCTL_SYNCOBJ_TIMELINE_WAIT takes an array of syncobj
* handles as well as an array of u64 points and does a host-side wait on all
* of syncobj fences at the given points simultaneously.
@@ -1027,7 +1032,8 @@ static signed long drm_syncobj_array_wait_timeout(struct drm_syncobj **syncobjs,
uint32_t count,
uint32_t flags,
signed long timeout,
- uint32_t *idx)
+ uint32_t *idx,
+ ktime_t *deadline)
{
struct syncobj_wait_entry *entries;
struct dma_fence *fence;
@@ -1108,6 +1114,15 @@ static signed long drm_syncobj_array_wait_timeout(struct drm_syncobj **syncobjs,
drm_syncobj_fence_add_wait(syncobjs[i], &entries[i]);
}
+ if (deadline) {
+ for (i = 0; i < count; ++i) {
+ fence = entries[i].fence;
+ if (!fence)
+ continue;
+ dma_fence_set_deadline(fence, *deadline);
+ }
+ }
+
do {
set_current_state(TASK_INTERRUPTIBLE);
@@ -1206,7 +1221,8 @@ static int drm_syncobj_array_wait(struct drm_device *dev,
struct drm_file *file_private,
struct drm_syncobj_wait *wait,
struct drm_syncobj_timeline_wait *timeline_wait,
- struct drm_syncobj **syncobjs, bool timeline)
+ struct drm_syncobj **syncobjs, bool timeline,
+ ktime_t *deadline)
{
signed long timeout = 0;
uint32_t first = ~0;
@@ -1217,7 +1233,8 @@ static int drm_syncobj_array_wait(struct drm_device *dev,
NULL,
wait->count_handles,
wait->flags,
- timeout, &first);
+ timeout, &first,
+ deadline);
if (timeout < 0)
return timeout;
wait->first_signaled = first;
@@ -1227,7 +1244,8 @@ static int drm_syncobj_array_wait(struct drm_device *dev,
u64_to_user_ptr(timeline_wait->points),
timeline_wait->count_handles,
timeline_wait->flags,
- timeout, &first);
+ timeout, &first,
+ deadline);
if (timeout < 0)
return timeout;
timeline_wait->first_signaled = first;
@@ -1298,17 +1316,22 @@ drm_syncobj_wait_ioctl(struct drm_device *dev, void *data,
{
struct drm_syncobj_wait *args = data;
struct drm_syncobj **syncobjs;
+ unsigned int possible_flags;
+ ktime_t t, *tp = NULL;
int ret = 0;
if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
return -EOPNOTSUPP;
- if (args->flags & ~(DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL |
- DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT))
+ possible_flags = DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL |
+ DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT |
+ DRM_SYNCOBJ_WAIT_FLAGS_WAIT_DEADLINE;
+
+ if (args->flags & ~possible_flags)
return -EINVAL;
if (args->count_handles == 0)
- return -EINVAL;
+ return 0;
ret = drm_syncobj_array_find(file_private,
u64_to_user_ptr(args->handles),
@@ -1317,8 +1340,13 @@ drm_syncobj_wait_ioctl(struct drm_device *dev, void *data,
if (ret < 0)
return ret;
+ if (args->flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_DEADLINE) {
+ t = ns_to_ktime(args->deadline_nsec);
+ tp = &t;
+ }
+
ret = drm_syncobj_array_wait(dev, file_private,
- args, NULL, syncobjs, false);
+ args, NULL, syncobjs, false, tp);
drm_syncobj_array_free(syncobjs, args->count_handles);
@@ -1331,18 +1359,23 @@ drm_syncobj_timeline_wait_ioctl(struct drm_device *dev, void *data,
{
struct drm_syncobj_timeline_wait *args = data;
struct drm_syncobj **syncobjs;
+ unsigned int possible_flags;
+ ktime_t t, *tp = NULL;
int ret = 0;
if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ_TIMELINE))
return -EOPNOTSUPP;
- if (args->flags & ~(DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL |
- DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT |
- DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE))
+ possible_flags = DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL |
+ DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT |
+ DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE |
+ DRM_SYNCOBJ_WAIT_FLAGS_WAIT_DEADLINE;
+
+ if (args->flags & ~possible_flags)
return -EINVAL;
if (args->count_handles == 0)
- return -EINVAL;
+ return 0;
ret = drm_syncobj_array_find(file_private,
u64_to_user_ptr(args->handles),
@@ -1351,8 +1384,13 @@ drm_syncobj_timeline_wait_ioctl(struct drm_device *dev, void *data,
if (ret < 0)
return ret;
+ if (args->flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_DEADLINE) {
+ t = ns_to_ktime(args->deadline_nsec);
+ tp = &t;
+ }
+
ret = drm_syncobj_array_wait(dev, file_private,
- NULL, args, syncobjs, true);
+ NULL, args, syncobjs, true, tp);
drm_syncobj_array_free(syncobjs, args->count_handles);
diff --git a/drivers/gpu/drm/drm_vblank.c b/drivers/gpu/drm/drm_vblank.c
index 877e2067534f..702a12bc93bd 100644
--- a/drivers/gpu/drm/drm_vblank.c
+++ b/drivers/gpu/drm/drm_vblank.c
@@ -210,11 +210,6 @@ static u32 __get_vblank_counter(struct drm_device *dev, unsigned int pipe)
if (crtc->funcs->get_vblank_counter)
return crtc->funcs->get_vblank_counter(crtc);
}
-#ifdef CONFIG_DRM_LEGACY
- else if (dev->driver->get_vblank_counter) {
- return dev->driver->get_vblank_counter(dev, pipe);
- }
-#endif
return drm_vblank_no_hw_counter(dev, pipe);
}
@@ -433,11 +428,6 @@ static void __disable_vblank(struct drm_device *dev, unsigned int pipe)
if (crtc->funcs->disable_vblank)
crtc->funcs->disable_vblank(crtc);
}
-#ifdef CONFIG_DRM_LEGACY
- else {
- dev->driver->disable_vblank(dev, pipe);
- }
-#endif
}
/*
@@ -1151,11 +1141,6 @@ static int __enable_vblank(struct drm_device *dev, unsigned int pipe)
if (crtc->funcs->enable_vblank)
return crtc->funcs->enable_vblank(crtc);
}
-#ifdef CONFIG_DRM_LEGACY
- else if (dev->driver->enable_vblank) {
- return dev->driver->enable_vblank(dev, pipe);
- }
-#endif
return -EINVAL;
}
@@ -1574,88 +1559,6 @@ void drm_crtc_vblank_restore(struct drm_crtc *crtc)
}
EXPORT_SYMBOL(drm_crtc_vblank_restore);
-static void drm_legacy_vblank_pre_modeset(struct drm_device *dev,
- unsigned int pipe)
-{
- struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
-
- /* vblank is not initialized (IRQ not installed ?), or has been freed */
- if (!drm_dev_has_vblank(dev))
- return;
-
- if (drm_WARN_ON(dev, pipe >= dev->num_crtcs))
- return;
-
- /*
- * To avoid all the problems that might happen if interrupts
- * were enabled/disabled around or between these calls, we just
- * have the kernel take a reference on the CRTC (just once though
- * to avoid corrupting the count if multiple, mismatch calls occur),
- * so that interrupts remain enabled in the interim.
- */
- if (!vblank->inmodeset) {
- vblank->inmodeset = 0x1;
- if (drm_vblank_get(dev, pipe) == 0)
- vblank->inmodeset |= 0x2;
- }
-}
-
-static void drm_legacy_vblank_post_modeset(struct drm_device *dev,
- unsigned int pipe)
-{
- struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
-
- /* vblank is not initialized (IRQ not installed ?), or has been freed */
- if (!drm_dev_has_vblank(dev))
- return;
-
- if (drm_WARN_ON(dev, pipe >= dev->num_crtcs))
- return;
-
- if (vblank->inmodeset) {
- spin_lock_irq(&dev->vbl_lock);
- drm_reset_vblank_timestamp(dev, pipe);
- spin_unlock_irq(&dev->vbl_lock);
-
- if (vblank->inmodeset & 0x2)
- drm_vblank_put(dev, pipe);
-
- vblank->inmodeset = 0;
- }
-}
-
-int drm_legacy_modeset_ctl_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_modeset_ctl *modeset = data;
- unsigned int pipe;
-
- /* If drm_vblank_init() hasn't been called yet, just no-op */
- if (!drm_dev_has_vblank(dev))
- return 0;
-
- /* KMS drivers handle this internally */
- if (!drm_core_check_feature(dev, DRIVER_LEGACY))
- return 0;
-
- pipe = modeset->crtc;
- if (pipe >= dev->num_crtcs)
- return -EINVAL;
-
- switch (modeset->cmd) {
- case _DRM_PRE_MODESET:
- drm_legacy_vblank_pre_modeset(dev, pipe);
- break;
- case _DRM_POST_MODESET:
- drm_legacy_vblank_post_modeset(dev, pipe);
- break;
- default:
- return -EINVAL;
- }
-
- return 0;
-}
-
static int drm_queue_vblank_event(struct drm_device *dev, unsigned int pipe,
u64 req_seq,
union drm_wait_vblank *vblwait,
@@ -1780,10 +1683,6 @@ static void drm_wait_vblank_reply(struct drm_device *dev, unsigned int pipe,
static bool drm_wait_vblank_supported(struct drm_device *dev)
{
-#if IS_ENABLED(CONFIG_DRM_LEGACY)
- if (unlikely(drm_core_check_feature(dev, DRIVER_LEGACY)))
- return dev->irq_enabled;
-#endif
return drm_dev_has_vblank(dev);
}
diff --git a/drivers/gpu/drm/drm_vm.c b/drivers/gpu/drm/drm_vm.c
deleted file mode 100644
index 87c9fe55dec7..000000000000
--- a/drivers/gpu/drm/drm_vm.c
+++ /dev/null
@@ -1,665 +0,0 @@
-/*
- * \file drm_vm.c
- * Memory mapping for DRM
- *
- * \author Rickard E. (Rik) Faith <[email protected]>
- * \author Gareth Hughes <[email protected]>
- */
-
-/*
- * Created: Mon Jan 4 08:58:31 1999 by [email protected]
- *
- * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
- * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
-#include <linux/export.h>
-#include <linux/pci.h>
-#include <linux/seq_file.h>
-#include <linux/vmalloc.h>
-#include <linux/pgtable.h>
-
-#if defined(__ia64__)
-#include <linux/efi.h>
-#include <linux/slab.h>
-#endif
-#include <linux/mem_encrypt.h>
-
-#include <drm/drm_device.h>
-#include <drm/drm_drv.h>
-#include <drm/drm_file.h>
-#include <drm/drm_framebuffer.h>
-#include <drm/drm_print.h>
-
-#include "drm_internal.h"
-#include "drm_legacy.h"
-
-struct drm_vma_entry {
- struct list_head head;
- struct vm_area_struct *vma;
- pid_t pid;
-};
-
-static void drm_vm_open(struct vm_area_struct *vma);
-static void drm_vm_close(struct vm_area_struct *vma);
-
-static pgprot_t drm_io_prot(struct drm_local_map *map,
- struct vm_area_struct *vma)
-{
- pgprot_t tmp = vm_get_page_prot(vma->vm_flags);
-
-#if defined(__i386__) || defined(__x86_64__) || defined(__powerpc__) || \
- defined(__mips__) || defined(__loongarch__)
- if (map->type == _DRM_REGISTERS && !(map->flags & _DRM_WRITE_COMBINING))
- tmp = pgprot_noncached(tmp);
- else
- tmp = pgprot_writecombine(tmp);
-#elif defined(__ia64__)
- if (efi_range_is_wc(vma->vm_start, vma->vm_end -
- vma->vm_start))
- tmp = pgprot_writecombine(tmp);
- else
- tmp = pgprot_noncached(tmp);
-#elif defined(__sparc__) || defined(__arm__)
- tmp = pgprot_noncached(tmp);
-#endif
- return tmp;
-}
-
-static pgprot_t drm_dma_prot(uint32_t map_type, struct vm_area_struct *vma)
-{
- pgprot_t tmp = vm_get_page_prot(vma->vm_flags);
-
-#if defined(__powerpc__) && defined(CONFIG_NOT_COHERENT_CACHE)
- tmp = pgprot_noncached_wc(tmp);
-#endif
- return tmp;
-}
-
-/*
- * \c fault method for AGP virtual memory.
- *
- * \param vma virtual memory area.
- * \param address access address.
- * \return pointer to the page structure.
- *
- * Find the right map and if it's AGP memory find the real physical page to
- * map, get the page, increment the use count and return it.
- */
-#if IS_ENABLED(CONFIG_AGP)
-static vm_fault_t drm_vm_fault(struct vm_fault *vmf)
-{
- struct vm_area_struct *vma = vmf->vma;
- struct drm_file *priv = vma->vm_file->private_data;
- struct drm_device *dev = priv->minor->dev;
- struct drm_local_map *map = NULL;
- struct drm_map_list *r_list;
- struct drm_hash_item *hash;
-
- /*
- * Find the right map
- */
- if (!dev->agp)
- goto vm_fault_error;
-
- if (!dev->agp || !dev->agp->cant_use_aperture)
- goto vm_fault_error;
-
- if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff, &hash))
- goto vm_fault_error;
-
- r_list = drm_hash_entry(hash, struct drm_map_list, hash);
- map = r_list->map;
-
- if (map && map->type == _DRM_AGP) {
- /*
- * Using vm_pgoff as a selector forces us to use this unusual
- * addressing scheme.
- */
- resource_size_t offset = vmf->address - vma->vm_start;
- resource_size_t baddr = map->offset + offset;
- struct drm_agp_mem *agpmem;
- struct page *page;
-
-#ifdef __alpha__
- /*
- * Adjust to a bus-relative address
- */
- baddr -= dev->hose->mem_space->start;
-#endif
-
- /*
- * It's AGP memory - find the real physical page to map
- */
- list_for_each_entry(agpmem, &dev->agp->memory, head) {
- if (agpmem->bound <= baddr &&
- agpmem->bound + agpmem->pages * PAGE_SIZE > baddr)
- break;
- }
-
- if (&agpmem->head == &dev->agp->memory)
- goto vm_fault_error;
-
- /*
- * Get the page, inc the use count, and return it
- */
- offset = (baddr - agpmem->bound) >> PAGE_SHIFT;
- page = agpmem->memory->pages[offset];
- get_page(page);
- vmf->page = page;
-
- DRM_DEBUG
- ("baddr = 0x%llx page = 0x%p, offset = 0x%llx, count=%d\n",
- (unsigned long long)baddr,
- agpmem->memory->pages[offset],
- (unsigned long long)offset,
- page_count(page));
- return 0;
- }
-vm_fault_error:
- return VM_FAULT_SIGBUS; /* Disallow mremap */
-}
-#else
-static vm_fault_t drm_vm_fault(struct vm_fault *vmf)
-{
- return VM_FAULT_SIGBUS;
-}
-#endif
-
-/*
- * \c nopage method for shared virtual memory.
- *
- * \param vma virtual memory area.
- * \param address access address.
- * \return pointer to the page structure.
- *
- * Get the mapping, find the real physical page to map, get the page, and
- * return it.
- */
-static vm_fault_t drm_vm_shm_fault(struct vm_fault *vmf)
-{
- struct vm_area_struct *vma = vmf->vma;
- struct drm_local_map *map = vma->vm_private_data;
- unsigned long offset;
- unsigned long i;
- struct page *page;
-
- if (!map)
- return VM_FAULT_SIGBUS; /* Nothing allocated */
-
- offset = vmf->address - vma->vm_start;
- i = (unsigned long)map->handle + offset;
- page = vmalloc_to_page((void *)i);
- if (!page)
- return VM_FAULT_SIGBUS;
- get_page(page);
- vmf->page = page;
-
- DRM_DEBUG("shm_fault 0x%lx\n", offset);
- return 0;
-}
-
-/*
- * \c close method for shared virtual memory.
- *
- * \param vma virtual memory area.
- *
- * Deletes map information if we are the last
- * person to close a mapping and it's not in the global maplist.
- */
-static void drm_vm_shm_close(struct vm_area_struct *vma)
-{
- struct drm_file *priv = vma->vm_file->private_data;
- struct drm_device *dev = priv->minor->dev;
- struct drm_vma_entry *pt, *temp;
- struct drm_local_map *map;
- struct drm_map_list *r_list;
- int found_maps = 0;
-
- DRM_DEBUG("0x%08lx,0x%08lx\n",
- vma->vm_start, vma->vm_end - vma->vm_start);
-
- map = vma->vm_private_data;
-
- mutex_lock(&dev->struct_mutex);
- list_for_each_entry_safe(pt, temp, &dev->vmalist, head) {
- if (pt->vma->vm_private_data == map)
- found_maps++;
- if (pt->vma == vma) {
- list_del(&pt->head);
- kfree(pt);
- }
- }
-
- /* We were the only map that was found */
- if (found_maps == 1 && map->flags & _DRM_REMOVABLE) {
- /* Check to see if we are in the maplist, if we are not, then
- * we delete this mappings information.
- */
- found_maps = 0;
- list_for_each_entry(r_list, &dev->maplist, head) {
- if (r_list->map == map)
- found_maps++;
- }
-
- if (!found_maps) {
- switch (map->type) {
- case _DRM_REGISTERS:
- case _DRM_FRAME_BUFFER:
- arch_phys_wc_del(map->mtrr);
- iounmap(map->handle);
- break;
- case _DRM_SHM:
- vfree(map->handle);
- break;
- case _DRM_AGP:
- case _DRM_SCATTER_GATHER:
- break;
- case _DRM_CONSISTENT:
- dma_free_coherent(dev->dev,
- map->size,
- map->handle,
- map->offset);
- break;
- }
- kfree(map);
- }
- }
- mutex_unlock(&dev->struct_mutex);
-}
-
-/*
- * \c fault method for DMA virtual memory.
- *
- * \param address access address.
- * \return pointer to the page structure.
- *
- * Determine the page number from the page offset and get it from drm_device_dma::pagelist.
- */
-static vm_fault_t drm_vm_dma_fault(struct vm_fault *vmf)
-{
- struct vm_area_struct *vma = vmf->vma;
- struct drm_file *priv = vma->vm_file->private_data;
- struct drm_device *dev = priv->minor->dev;
- struct drm_device_dma *dma = dev->dma;
- unsigned long offset;
- unsigned long page_nr;
- struct page *page;
-
- if (!dma)
- return VM_FAULT_SIGBUS; /* Error */
- if (!dma->pagelist)
- return VM_FAULT_SIGBUS; /* Nothing allocated */
-
- offset = vmf->address - vma->vm_start;
- /* vm_[pg]off[set] should be 0 */
- page_nr = offset >> PAGE_SHIFT; /* page_nr could just be vmf->pgoff */
- page = virt_to_page((void *)dma->pagelist[page_nr]);
-
- get_page(page);
- vmf->page = page;
-
- DRM_DEBUG("dma_fault 0x%lx (page %lu)\n", offset, page_nr);
- return 0;
-}
-
-/*
- * \c fault method for scatter-gather virtual memory.
- *
- * \param address access address.
- * \return pointer to the page structure.
- *
- * Determine the map offset from the page offset and get it from drm_sg_mem::pagelist.
- */
-static vm_fault_t drm_vm_sg_fault(struct vm_fault *vmf)
-{
- struct vm_area_struct *vma = vmf->vma;
- struct drm_local_map *map = vma->vm_private_data;
- struct drm_file *priv = vma->vm_file->private_data;
- struct drm_device *dev = priv->minor->dev;
- struct drm_sg_mem *entry = dev->sg;
- unsigned long offset;
- unsigned long map_offset;
- unsigned long page_offset;
- struct page *page;
-
- if (!entry)
- return VM_FAULT_SIGBUS; /* Error */
- if (!entry->pagelist)
- return VM_FAULT_SIGBUS; /* Nothing allocated */
-
- offset = vmf->address - vma->vm_start;
- map_offset = map->offset - (unsigned long)dev->sg->virtual;
- page_offset = (offset >> PAGE_SHIFT) + (map_offset >> PAGE_SHIFT);
- page = entry->pagelist[page_offset];
- get_page(page);
- vmf->page = page;
-
- return 0;
-}
-
-/** AGP virtual memory operations */
-static const struct vm_operations_struct drm_vm_ops = {
- .fault = drm_vm_fault,
- .open = drm_vm_open,
- .close = drm_vm_close,
-};
-
-/** Shared virtual memory operations */
-static const struct vm_operations_struct drm_vm_shm_ops = {
- .fault = drm_vm_shm_fault,
- .open = drm_vm_open,
- .close = drm_vm_shm_close,
-};
-
-/** DMA virtual memory operations */
-static const struct vm_operations_struct drm_vm_dma_ops = {
- .fault = drm_vm_dma_fault,
- .open = drm_vm_open,
- .close = drm_vm_close,
-};
-
-/** Scatter-gather virtual memory operations */
-static const struct vm_operations_struct drm_vm_sg_ops = {
- .fault = drm_vm_sg_fault,
- .open = drm_vm_open,
- .close = drm_vm_close,
-};
-
-static void drm_vm_open_locked(struct drm_device *dev,
- struct vm_area_struct *vma)
-{
- struct drm_vma_entry *vma_entry;
-
- DRM_DEBUG("0x%08lx,0x%08lx\n",
- vma->vm_start, vma->vm_end - vma->vm_start);
-
- vma_entry = kmalloc(sizeof(*vma_entry), GFP_KERNEL);
- if (vma_entry) {
- vma_entry->vma = vma;
- vma_entry->pid = current->pid;
- list_add(&vma_entry->head, &dev->vmalist);
- }
-}
-
-static void drm_vm_open(struct vm_area_struct *vma)
-{
- struct drm_file *priv = vma->vm_file->private_data;
- struct drm_device *dev = priv->minor->dev;
-
- mutex_lock(&dev->struct_mutex);
- drm_vm_open_locked(dev, vma);
- mutex_unlock(&dev->struct_mutex);
-}
-
-static void drm_vm_close_locked(struct drm_device *dev,
- struct vm_area_struct *vma)
-{
- struct drm_vma_entry *pt, *temp;
-
- DRM_DEBUG("0x%08lx,0x%08lx\n",
- vma->vm_start, vma->vm_end - vma->vm_start);
-
- list_for_each_entry_safe(pt, temp, &dev->vmalist, head) {
- if (pt->vma == vma) {
- list_del(&pt->head);
- kfree(pt);
- break;
- }
- }
-}
-
-/*
- * \c close method for all virtual memory types.
- *
- * \param vma virtual memory area.
- *
- * Search the \p vma private data entry in drm_device::vmalist, unlink it, and
- * free it.
- */
-static void drm_vm_close(struct vm_area_struct *vma)
-{
- struct drm_file *priv = vma->vm_file->private_data;
- struct drm_device *dev = priv->minor->dev;
-
- mutex_lock(&dev->struct_mutex);
- drm_vm_close_locked(dev, vma);
- mutex_unlock(&dev->struct_mutex);
-}
-
-/*
- * mmap DMA memory.
- *
- * \param file_priv DRM file private.
- * \param vma virtual memory area.
- * \return zero on success or a negative number on failure.
- *
- * Sets the virtual memory area operations structure to vm_dma_ops, the file
- * pointer, and calls vm_open().
- */
-static int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma)
-{
- struct drm_file *priv = filp->private_data;
- struct drm_device *dev;
- struct drm_device_dma *dma;
- unsigned long length = vma->vm_end - vma->vm_start;
-
- dev = priv->minor->dev;
- dma = dev->dma;
- DRM_DEBUG("start = 0x%lx, end = 0x%lx, page offset = 0x%lx\n",
- vma->vm_start, vma->vm_end, vma->vm_pgoff);
-
- /* Length must match exact page count */
- if (!dma || (length >> PAGE_SHIFT) != dma->page_count) {
- return -EINVAL;
- }
-
- if (!capable(CAP_SYS_ADMIN) &&
- (dma->flags & _DRM_DMA_USE_PCI_RO)) {
- vm_flags_clear(vma, VM_WRITE | VM_MAYWRITE);
-#if defined(__i386__) || defined(__x86_64__)
- pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW;
-#else
- /* Ye gads this is ugly. With more thought
- we could move this up higher and use
- `protection_map' instead. */
- vma->vm_page_prot =
- __pgprot(pte_val
- (pte_wrprotect
- (__pte(pgprot_val(vma->vm_page_prot)))));
-#endif
- }
-
- vma->vm_ops = &drm_vm_dma_ops;
-
- vm_flags_set(vma, VM_DONTEXPAND | VM_DONTDUMP);
-
- drm_vm_open_locked(dev, vma);
- return 0;
-}
-
-static resource_size_t drm_core_get_reg_ofs(struct drm_device *dev)
-{
-#ifdef __alpha__
- return dev->hose->dense_mem_base;
-#else
- return 0;
-#endif
-}
-
-/*
- * mmap DMA memory.
- *
- * \param file_priv DRM file private.
- * \param vma virtual memory area.
- * \return zero on success or a negative number on failure.
- *
- * If the virtual memory area has no offset associated with it then it's a DMA
- * area, so calls mmap_dma(). Otherwise searches the map in drm_device::maplist,
- * checks that the restricted flag is not set, sets the virtual memory operations
- * according to the mapping type and remaps the pages. Finally sets the file
- * pointer and calls vm_open().
- */
-static int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma)
-{
- struct drm_file *priv = filp->private_data;
- struct drm_device *dev = priv->minor->dev;
- struct drm_local_map *map = NULL;
- resource_size_t offset = 0;
- struct drm_hash_item *hash;
-
- DRM_DEBUG("start = 0x%lx, end = 0x%lx, page offset = 0x%lx\n",
- vma->vm_start, vma->vm_end, vma->vm_pgoff);
-
- if (!priv->authenticated)
- return -EACCES;
-
- /* We check for "dma". On Apple's UniNorth, it's valid to have
- * the AGP mapped at physical address 0
- * --BenH.
- */
- if (!vma->vm_pgoff
-#if IS_ENABLED(CONFIG_AGP)
- && (!dev->agp
- || dev->agp->agp_info.device->vendor != PCI_VENDOR_ID_APPLE)
-#endif
- )
- return drm_mmap_dma(filp, vma);
-
- if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff, &hash)) {
- DRM_ERROR("Could not find map\n");
- return -EINVAL;
- }
-
- map = drm_hash_entry(hash, struct drm_map_list, hash)->map;
- if (!map || ((map->flags & _DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN)))
- return -EPERM;
-
- /* Check for valid size. */
- if (map->size < vma->vm_end - vma->vm_start)
- return -EINVAL;
-
- if (!capable(CAP_SYS_ADMIN) && (map->flags & _DRM_READ_ONLY)) {
- vm_flags_clear(vma, VM_WRITE | VM_MAYWRITE);
-#if defined(__i386__) || defined(__x86_64__)
- pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW;
-#else
- /* Ye gads this is ugly. With more thought
- we could move this up higher and use
- `protection_map' instead. */
- vma->vm_page_prot =
- __pgprot(pte_val
- (pte_wrprotect
- (__pte(pgprot_val(vma->vm_page_prot)))));
-#endif
- }
-
- switch (map->type) {
-#if !defined(__arm__)
- case _DRM_AGP:
- if (dev->agp && dev->agp->cant_use_aperture) {
- /*
- * On some platforms we can't talk to bus dma address from the CPU, so for
- * memory of type DRM_AGP, we'll deal with sorting out the real physical
- * pages and mappings in fault()
- */
-#if defined(__powerpc__)
- vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
-#endif
- vma->vm_ops = &drm_vm_ops;
- break;
- }
- fallthrough; /* to _DRM_FRAME_BUFFER... */
-#endif
- case _DRM_FRAME_BUFFER:
- case _DRM_REGISTERS:
- offset = drm_core_get_reg_ofs(dev);
- vma->vm_page_prot = drm_io_prot(map, vma);
- if (io_remap_pfn_range(vma, vma->vm_start,
- (map->offset + offset) >> PAGE_SHIFT,
- vma->vm_end - vma->vm_start,
- vma->vm_page_prot))
- return -EAGAIN;
- DRM_DEBUG(" Type = %d; start = 0x%lx, end = 0x%lx,"
- " offset = 0x%llx\n",
- map->type,
- vma->vm_start, vma->vm_end, (unsigned long long)(map->offset + offset));
-
- vma->vm_ops = &drm_vm_ops;
- break;
- case _DRM_CONSISTENT:
- /* Consistent memory is really like shared memory. But
- * it's allocated in a different way, so avoid fault */
- if (remap_pfn_range(vma, vma->vm_start,
- page_to_pfn(virt_to_page(map->handle)),
- vma->vm_end - vma->vm_start, vma->vm_page_prot))
- return -EAGAIN;
- vma->vm_page_prot = drm_dma_prot(map->type, vma);
- fallthrough; /* to _DRM_SHM */
- case _DRM_SHM:
- vma->vm_ops = &drm_vm_shm_ops;
- vma->vm_private_data = (void *)map;
- break;
- case _DRM_SCATTER_GATHER:
- vma->vm_ops = &drm_vm_sg_ops;
- vma->vm_private_data = (void *)map;
- vma->vm_page_prot = drm_dma_prot(map->type, vma);
- break;
- default:
- return -EINVAL; /* This should never happen. */
- }
- vm_flags_set(vma, VM_DONTEXPAND | VM_DONTDUMP);
-
- drm_vm_open_locked(dev, vma);
- return 0;
-}
-
-int drm_legacy_mmap(struct file *filp, struct vm_area_struct *vma)
-{
- struct drm_file *priv = filp->private_data;
- struct drm_device *dev = priv->minor->dev;
- int ret;
-
- if (drm_dev_is_unplugged(dev))
- return -ENODEV;
-
- mutex_lock(&dev->struct_mutex);
- ret = drm_mmap_locked(filp, vma);
- mutex_unlock(&dev->struct_mutex);
-
- return ret;
-}
-EXPORT_SYMBOL(drm_legacy_mmap);
-
-#if IS_ENABLED(CONFIG_DRM_LEGACY)
-void drm_legacy_vma_flush(struct drm_device *dev)
-{
- struct drm_vma_entry *vma, *vma_temp;
-
- /* Clear vma list (only needed for legacy drivers) */
- list_for_each_entry_safe(vma, vma_temp, &dev->vmalist, head) {
- list_del(&vma->head);
- kfree(vma);
- }
-}
-#endif
diff --git a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
index 4d986077738b..776f2f0b602d 100644
--- a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
+++ b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
@@ -862,18 +862,16 @@ err_disable_pm_runtime:
return ret;
}
-static int exynos5433_decon_remove(struct platform_device *pdev)
+static void exynos5433_decon_remove(struct platform_device *pdev)
{
pm_runtime_disable(&pdev->dev);
component_del(&pdev->dev, &decon_component_ops);
-
- return 0;
}
struct platform_driver exynos5433_decon_driver = {
.probe = exynos5433_decon_probe,
- .remove = exynos5433_decon_remove,
+ .remove_new = exynos5433_decon_remove,
.driver = {
.name = "exynos5433-decon",
.pm = pm_ptr(&exynos5433_decon_pm_ops),
diff --git a/drivers/gpu/drm/exynos/exynos7_drm_decon.c b/drivers/gpu/drm/exynos/exynos7_drm_decon.c
index 0156a5e94435..0d185c0564b9 100644
--- a/drivers/gpu/drm/exynos/exynos7_drm_decon.c
+++ b/drivers/gpu/drm/exynos/exynos7_drm_decon.c
@@ -765,7 +765,7 @@ err_iounmap:
return ret;
}
-static int decon_remove(struct platform_device *pdev)
+static void decon_remove(struct platform_device *pdev)
{
struct decon_context *ctx = dev_get_drvdata(&pdev->dev);
@@ -774,8 +774,6 @@ static int decon_remove(struct platform_device *pdev)
iounmap(ctx->regs);
component_del(&pdev->dev, &decon_component_ops);
-
- return 0;
}
static int exynos7_decon_suspend(struct device *dev)
@@ -840,7 +838,7 @@ static DEFINE_RUNTIME_DEV_PM_OPS(exynos7_decon_pm_ops, exynos7_decon_suspend,
struct platform_driver decon_driver = {
.probe = decon_probe,
- .remove = decon_remove,
+ .remove_new = decon_remove,
.driver = {
.name = "exynos-decon",
.pm = pm_ptr(&exynos7_decon_pm_ops),
diff --git a/drivers/gpu/drm/exynos/exynos_dp.c b/drivers/gpu/drm/exynos/exynos_dp.c
index 3404ec1367fb..ca31bad6c576 100644
--- a/drivers/gpu/drm/exynos/exynos_dp.c
+++ b/drivers/gpu/drm/exynos/exynos_dp.c
@@ -250,14 +250,12 @@ out:
return component_add(&pdev->dev, &exynos_dp_ops);
}
-static int exynos_dp_remove(struct platform_device *pdev)
+static void exynos_dp_remove(struct platform_device *pdev)
{
struct exynos_dp_device *dp = platform_get_drvdata(pdev);
component_del(&pdev->dev, &exynos_dp_ops);
analogix_dp_remove(dp->adp);
-
- return 0;
}
static int exynos_dp_suspend(struct device *dev)
@@ -285,7 +283,7 @@ MODULE_DEVICE_TABLE(of, exynos_dp_match);
struct platform_driver dp_driver = {
.probe = exynos_dp_probe,
- .remove = exynos_dp_remove,
+ .remove_new = exynos_dp_remove,
.driver = {
.name = "exynos-dp",
.owner = THIS_MODULE,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dma.c b/drivers/gpu/drm/exynos/exynos_drm_dma.c
index a971590b8132..e2c7373f20c6 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dma.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dma.c
@@ -107,18 +107,16 @@ int exynos_drm_register_dma(struct drm_device *drm, struct device *dev,
return 0;
if (!priv->mapping) {
- void *mapping;
+ void *mapping = NULL;
if (IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU))
mapping = arm_iommu_create_mapping(&platform_bus_type,
EXYNOS_DEV_ADDR_START, EXYNOS_DEV_ADDR_SIZE);
else if (IS_ENABLED(CONFIG_IOMMU_DMA))
mapping = iommu_get_domain_for_dev(priv->dma_dev);
- else
- mapping = ERR_PTR(-ENODEV);
- if (IS_ERR(mapping))
- return PTR_ERR(mapping);
+ if (!mapping)
+ return -ENODEV;
priv->mapping = mapping;
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dpi.c b/drivers/gpu/drm/exynos/exynos_drm_dpi.c
index 378e5381978f..0dc36df6ada3 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dpi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dpi.c
@@ -101,7 +101,7 @@ static int exynos_dpi_create_connector(struct drm_encoder *encoder)
ret = drm_connector_init(encoder->dev, connector,
&exynos_dpi_connector_funcs,
- DRM_MODE_CONNECTOR_VGA);
+ DRM_MODE_CONNECTOR_DPI);
if (ret) {
DRM_DEV_ERROR(ctx->dev,
"failed to initialize connector with drm\n");
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index 8399256cb5c9..7c59e1164a48 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -300,6 +300,7 @@ err_mode_config_cleanup:
drm_mode_config_cleanup(drm);
exynos_drm_cleanup_dma(drm);
kfree(private);
+ dev_set_drvdata(dev, NULL);
err_free_drm:
drm_dev_put(drm);
@@ -313,6 +314,7 @@ static void exynos_drm_unbind(struct device *dev)
drm_dev_unregister(drm);
drm_kms_helper_poll_fini(drm);
+ drm_atomic_helper_shutdown(drm);
component_unbind_all(drm->dev, drm);
drm_mode_config_cleanup(drm);
@@ -344,15 +346,23 @@ static int exynos_drm_platform_probe(struct platform_device *pdev)
match);
}
-static int exynos_drm_platform_remove(struct platform_device *pdev)
+static void exynos_drm_platform_remove(struct platform_device *pdev)
{
component_master_del(&pdev->dev, &exynos_drm_ops);
- return 0;
+}
+
+static void exynos_drm_platform_shutdown(struct platform_device *pdev)
+{
+ struct drm_device *drm = platform_get_drvdata(pdev);
+
+ if (drm)
+ drm_atomic_helper_shutdown(drm);
}
static struct platform_driver exynos_drm_platform_driver = {
.probe = exynos_drm_platform_probe,
- .remove = exynos_drm_platform_remove,
+ .remove_new = exynos_drm_platform_remove,
+ .shutdown = exynos_drm_platform_shutdown,
.driver = {
.name = "exynos-drm",
.pm = &exynos_drm_pm_ops,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimc.c b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
index 8de2714599fc..e81a576de398 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
@@ -1367,7 +1367,7 @@ err_pm_dis:
return ret;
}
-static int fimc_remove(struct platform_device *pdev)
+static void fimc_remove(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct fimc_context *ctx = get_fimc_context(dev);
@@ -1377,8 +1377,6 @@ static int fimc_remove(struct platform_device *pdev)
pm_runtime_disable(dev);
fimc_put_clocks(ctx);
-
- return 0;
}
static int fimc_runtime_suspend(struct device *dev)
@@ -1410,7 +1408,7 @@ MODULE_DEVICE_TABLE(of, fimc_of_match);
struct platform_driver fimc_driver = {
.probe = fimc_probe,
- .remove = fimc_remove,
+ .remove_new = fimc_remove,
.driver = {
.of_match_table = fimc_of_match,
.name = "exynos-drm-fimc",
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
index 8dde7b1e9b35..a9f1c5c05894 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
@@ -1277,13 +1277,11 @@ err_disable_pm_runtime:
return ret;
}
-static int fimd_remove(struct platform_device *pdev)
+static void fimd_remove(struct platform_device *pdev)
{
pm_runtime_disable(&pdev->dev);
component_del(&pdev->dev, &fimd_component_ops);
-
- return 0;
}
static int exynos_fimd_suspend(struct device *dev)
@@ -1325,7 +1323,7 @@ static DEFINE_RUNTIME_DEV_PM_OPS(exynos_fimd_pm_ops, exynos_fimd_suspend,
struct platform_driver fimd_driver = {
.probe = fimd_probe,
- .remove = fimd_remove,
+ .remove_new = fimd_remove,
.driver = {
.name = "exynos4-fb",
.owner = THIS_MODULE,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
index 414e585ec7dd..f3138423612e 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
@@ -1530,7 +1530,7 @@ err_destroy_slab:
return ret;
}
-static int g2d_remove(struct platform_device *pdev)
+static void g2d_remove(struct platform_device *pdev)
{
struct g2d_data *g2d = platform_get_drvdata(pdev);
@@ -1545,8 +1545,6 @@ static int g2d_remove(struct platform_device *pdev)
g2d_fini_cmdlist(g2d);
destroy_workqueue(g2d->g2d_workq);
kmem_cache_destroy(g2d->runqueue_slab);
-
- return 0;
}
static int g2d_suspend(struct device *dev)
@@ -1609,7 +1607,7 @@ MODULE_DEVICE_TABLE(of, exynos_g2d_match);
struct platform_driver g2d_driver = {
.probe = g2d_probe,
- .remove = g2d_remove,
+ .remove_new = g2d_remove,
.driver = {
.name = "exynos-drm-g2d",
.owner = THIS_MODULE,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gsc.c b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
index 34cdabc30b4f..e9a769590415 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gsc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
@@ -11,9 +11,10 @@
#include <linux/component.h>
#include <linux/kernel.h>
#include <linux/mfd/syscon.h>
-#include <linux/of_device.h>
+#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
+#include <linux/property.h>
#include <linux/regmap.h>
#include <drm/drm_fourcc.h>
@@ -103,7 +104,7 @@ struct gsc_context {
unsigned int num_formats;
void __iomem *regs;
- const char **clk_names;
+ const char *const *clk_names;
struct clk *clocks[GSC_MAX_CLOCKS];
int num_clocks;
struct gsc_scaler sc;
@@ -1217,7 +1218,7 @@ static const unsigned int gsc_tiled_formats[] = {
static int gsc_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct gsc_driverdata *driver_data;
+ const struct gsc_driverdata *driver_data;
struct exynos_drm_ipp_formats *formats;
struct gsc_context *ctx;
int num_formats, ret, i, j;
@@ -1226,7 +1227,7 @@ static int gsc_probe(struct platform_device *pdev)
if (!ctx)
return -ENOMEM;
- driver_data = (struct gsc_driverdata *)of_device_get_match_data(dev);
+ driver_data = device_get_match_data(dev);
ctx->dev = dev;
ctx->num_clocks = driver_data->num_clocks;
ctx->clk_names = driver_data->clk_names;
@@ -1308,15 +1309,13 @@ err_pm_dis:
return ret;
}
-static int gsc_remove(struct platform_device *pdev)
+static void gsc_remove(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
component_del(dev, &gsc_component_ops);
pm_runtime_dont_use_autosuspend(dev);
pm_runtime_disable(dev);
-
- return 0;
}
static int __maybe_unused gsc_runtime_suspend(struct device *dev)
@@ -1421,7 +1420,7 @@ MODULE_DEVICE_TABLE(of, exynos_drm_gsc_of_match);
struct platform_driver gsc_driver = {
.probe = gsc_probe,
- .remove = gsc_remove,
+ .remove_new = gsc_remove,
.driver = {
.name = "exynos-drm-gsc",
.owner = THIS_MODULE,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_mic.c b/drivers/gpu/drm/exynos/exynos_drm_mic.c
index 17bab5b1663f..e2920960180f 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_mic.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_mic.c
@@ -442,7 +442,7 @@ err:
return ret;
}
-static int exynos_mic_remove(struct platform_device *pdev)
+static void exynos_mic_remove(struct platform_device *pdev)
{
struct exynos_mic *mic = platform_get_drvdata(pdev);
@@ -450,8 +450,6 @@ static int exynos_mic_remove(struct platform_device *pdev)
pm_runtime_disable(&pdev->dev);
drm_bridge_remove(&mic->bridge);
-
- return 0;
}
static const struct of_device_id exynos_mic_of_match[] = {
@@ -462,7 +460,7 @@ MODULE_DEVICE_TABLE(of, exynos_mic_of_match);
struct platform_driver mic_driver = {
.probe = exynos_mic_probe,
- .remove = exynos_mic_remove,
+ .remove_new = exynos_mic_remove,
.driver = {
.name = "exynos-mic",
.pm = pm_ptr(&exynos_mic_pm_ops),
diff --git a/drivers/gpu/drm/exynos/exynos_drm_rotator.c b/drivers/gpu/drm/exynos/exynos_drm_rotator.c
index ffb327c5139e..5f7516655b08 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_rotator.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_rotator.c
@@ -329,15 +329,13 @@ err_component:
return ret;
}
-static int rotator_remove(struct platform_device *pdev)
+static void rotator_remove(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
component_del(dev, &rotator_component_ops);
pm_runtime_dont_use_autosuspend(dev);
pm_runtime_disable(dev);
-
- return 0;
}
static int rotator_runtime_suspend(struct device *dev)
@@ -453,7 +451,7 @@ static DEFINE_RUNTIME_DEV_PM_OPS(rotator_pm_ops, rotator_runtime_suspend,
struct platform_driver rotator_driver = {
.probe = rotator_probe,
- .remove = rotator_remove,
+ .remove_new = rotator_remove,
.driver = {
.name = "exynos-rotator",
.owner = THIS_MODULE,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_scaler.c b/drivers/gpu/drm/exynos/exynos_drm_scaler.c
index f2b8b09a6b4e..392f721f13ab 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_scaler.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_scaler.c
@@ -539,15 +539,13 @@ err_ippdrv_register:
return ret;
}
-static int scaler_remove(struct platform_device *pdev)
+static void scaler_remove(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
component_del(dev, &scaler_component_ops);
pm_runtime_dont_use_autosuspend(dev);
pm_runtime_disable(dev);
-
- return 0;
}
static int clk_disable_unprepare_wrapper(struct clk *clk)
@@ -721,7 +719,7 @@ MODULE_DEVICE_TABLE(of, exynos_scaler_match);
struct platform_driver scaler_driver = {
.probe = scaler_probe,
- .remove = scaler_remove,
+ .remove_new = scaler_remove,
.driver = {
.name = "exynos-scaler",
.owner = THIS_MODULE,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_vidi.c b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
index f5e1adfcaa51..00382f28748a 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_vidi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
@@ -462,7 +462,7 @@ static int vidi_probe(struct platform_device *pdev)
return component_add(dev, &vidi_component_ops);
}
-static int vidi_remove(struct platform_device *pdev)
+static void vidi_remove(struct platform_device *pdev)
{
struct vidi_context *ctx = platform_get_drvdata(pdev);
@@ -472,13 +472,11 @@ static int vidi_remove(struct platform_device *pdev)
}
component_del(&pdev->dev, &vidi_component_ops);
-
- return 0;
}
struct platform_driver vidi_driver = {
.probe = vidi_probe,
- .remove = vidi_remove,
+ .remove_new = vidi_remove,
.driver = {
.name = "exynos-drm-vidi",
.owner = THIS_MODULE,
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c
index f3aaa4ea3e68..43bed6cbaaea 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_hdmi.c
@@ -1861,6 +1861,8 @@ static int hdmi_bind(struct device *dev, struct device *master, void *data)
return ret;
crtc = exynos_drm_crtc_get_by_type(drm_dev, EXYNOS_DISPLAY_TYPE_HDMI);
+ if (IS_ERR(crtc))
+ return PTR_ERR(crtc);
crtc->pipe_clk = &hdata->phy_clk;
ret = hdmi_create_connector(encoder);
@@ -2067,7 +2069,7 @@ err_ddc:
return ret;
}
-static int hdmi_remove(struct platform_device *pdev)
+static void hdmi_remove(struct platform_device *pdev)
{
struct hdmi_context *hdata = platform_get_drvdata(pdev);
@@ -2090,8 +2092,6 @@ static int hdmi_remove(struct platform_device *pdev)
put_device(&hdata->ddc_adpt->dev);
mutex_destroy(&hdata->mutex);
-
- return 0;
}
static int __maybe_unused exynos_hdmi_suspend(struct device *dev)
@@ -2123,7 +2123,7 @@ static const struct dev_pm_ops exynos_hdmi_pm_ops = {
struct platform_driver hdmi_driver = {
.probe = hdmi_probe,
- .remove = hdmi_remove,
+ .remove_new = hdmi_remove,
.driver = {
.name = "exynos-hdmi",
.owner = THIS_MODULE,
diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c
index b302392ff0d7..6822333fd0e6 100644
--- a/drivers/gpu/drm/exynos/exynos_mixer.c
+++ b/drivers/gpu/drm/exynos/exynos_mixer.c
@@ -1258,13 +1258,11 @@ static int mixer_probe(struct platform_device *pdev)
return ret;
}
-static int mixer_remove(struct platform_device *pdev)
+static void mixer_remove(struct platform_device *pdev)
{
pm_runtime_disable(&pdev->dev);
component_del(&pdev->dev, &mixer_component_ops);
-
- return 0;
}
static int __maybe_unused exynos_mixer_suspend(struct device *dev)
@@ -1338,5 +1336,5 @@ struct platform_driver mixer_driver = {
.of_match_table = mixer_match_types,
},
.probe = mixer_probe,
- .remove = mixer_remove,
+ .remove_new = mixer_remove,
};
diff --git a/drivers/gpu/drm/i915/Kconfig.debug b/drivers/gpu/drm/i915/Kconfig.debug
index 2d21930d5501..5b7162076850 100644
--- a/drivers/gpu/drm/i915/Kconfig.debug
+++ b/drivers/gpu/drm/i915/Kconfig.debug
@@ -24,7 +24,9 @@ config DRM_I915_DEBUG
select DEBUG_FS
select PREEMPT_COUNT
select I2C_CHARDEV
+ select REF_TRACKER
select STACKDEPOT
+ select STACKTRACE
select DRM_DP_AUX_CHARDEV
select X86_MSR # used by igt/pm_rpm
select DRM_VGEM # used by igt/prime_vgem (dmabuf interop checks)
@@ -38,6 +40,7 @@ config DRM_I915_DEBUG
select DRM_I915_DEBUG_GEM_ONCE
select DRM_I915_DEBUG_MMIO
select DRM_I915_DEBUG_RUNTIME_PM
+ select DRM_I915_DEBUG_WAKEREF
select DRM_I915_SW_FENCE_DEBUG_OBJECTS
select DRM_I915_SELFTEST
default n
@@ -231,7 +234,9 @@ config DRM_I915_DEBUG_RUNTIME_PM
bool "Enable extra state checking for runtime PM"
depends on DRM_I915
default n
+ select REF_TRACKER
select STACKDEPOT
+ select STACKTRACE
help
Choose this option to turn on extra state checking for the
runtime PM functionality. This may introduce overhead during
@@ -240,3 +245,16 @@ config DRM_I915_DEBUG_RUNTIME_PM
Recommended for driver developers only.
If in doubt, say "N"
+
+config DRM_I915_DEBUG_WAKEREF
+ bool "Enable extra tracking for wakerefs"
+ depends on DRM_I915
+ select REF_TRACKER
+ select STACKDEPOT
+ select STACKTRACE
+ help
+ Choose this option to turn on extra state checking and usage
+ tracking for the wakerefPM functionality. This may introduce
+ overhead during driver runtime.
+
+ If in doubt, say "N"
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 65e984242089..e777686190ca 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -280,6 +280,7 @@ i915-y += \
display/intel_dsb.o \
display/intel_dsb_buffer.o \
display/intel_fb.o \
+ display/intel_fb_bo.o \
display/intel_fb_pin.o \
display/intel_fbc.o \
display/intel_fdi.o \
@@ -318,7 +319,8 @@ i915-$(CONFIG_ACPI) += \
display/intel_acpi.o \
display/intel_opregion.o
i915-$(CONFIG_DRM_FBDEV_EMULATION) += \
- display/intel_fbdev.o
+ display/intel_fbdev.o \
+ display/intel_fbdev_fb.o
i915-$(CONFIG_DEBUG_FS) += \
display/intel_display_debugfs.o \
display/intel_display_debugfs_params.o \
diff --git a/drivers/gpu/drm/i915/display/i9xx_wm.c b/drivers/gpu/drm/i915/display/i9xx_wm.c
index b37c0d02d500..03e8fb6caa83 100644
--- a/drivers/gpu/drm/i915/display/i9xx_wm.c
+++ b/drivers/gpu/drm/i915/display/i9xx_wm.c
@@ -2477,7 +2477,7 @@ static unsigned int ilk_plane_wm_max(const struct drm_i915_private *dev_priv,
* FIFO size is only half of the self
* refresh FIFO size on ILK/SNB.
*/
- if (DISPLAY_VER(dev_priv) <= 6)
+ if (DISPLAY_VER(dev_priv) < 7)
fifo_size /= 2;
}
@@ -2818,7 +2818,7 @@ static int ilk_compute_pipe_wm(struct intel_atomic_state *state,
usable_level = dev_priv->display.wm.num_levels - 1;
/* ILK/SNB: LP2+ watermarks only w/o sprites */
- if (DISPLAY_VER(dev_priv) <= 6 && pipe_wm->sprites_enabled)
+ if (DISPLAY_VER(dev_priv) < 7 && pipe_wm->sprites_enabled)
usable_level = 1;
/* ILK/SNB/IVB: LP1+ watermarks only w/o scaling */
@@ -2961,7 +2961,7 @@ static void ilk_wm_merge(struct drm_i915_private *dev_priv,
int last_enabled_level = num_levels - 1;
/* ILK/SNB/IVB: LP1+ watermarks only w/ single pipe */
- if ((DISPLAY_VER(dev_priv) <= 6 || IS_IVYBRIDGE(dev_priv)) &&
+ if ((DISPLAY_VER(dev_priv) < 7 || IS_IVYBRIDGE(dev_priv)) &&
config->num_pipes_active > 1)
last_enabled_level = 0;
@@ -3060,7 +3060,7 @@ static void ilk_compute_wm_results(struct drm_i915_private *dev_priv,
* Always set WM_LP_SPRITE_EN when spr_val != 0, even if the
* level is disabled. Doing otherwise could cause underruns.
*/
- if (DISPLAY_VER(dev_priv) <= 6 && r->spr_val) {
+ if (DISPLAY_VER(dev_priv) < 7 && r->spr_val) {
drm_WARN_ON(&dev_priv->drm, wm_lp != 1);
results->wm_lp_spr[wm_lp - 1] |= WM_LP_SPRITE_ENABLE;
}
diff --git a/drivers/gpu/drm/i915/display/icl_dsi.c b/drivers/gpu/drm/i915/display/icl_dsi.c
index 481fcb650850..ac456a2275db 100644
--- a/drivers/gpu/drm/i915/display/icl_dsi.c
+++ b/drivers/gpu/drm/i915/display/icl_dsi.c
@@ -1440,6 +1440,13 @@ static void gen11_dsi_post_disable(struct intel_atomic_state *state,
static enum drm_mode_status gen11_dsi_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
+ struct drm_i915_private *i915 = to_i915(connector->dev);
+ enum drm_mode_status status;
+
+ status = intel_cpu_transcoder_mode_valid(i915, mode);
+ if (status != MODE_OK)
+ return status;
+
/* FIXME: DSC? */
return intel_dsi_mode_valid(connector, mode);
}
diff --git a/drivers/gpu/drm/i915/display/intel_bios.c b/drivers/gpu/drm/i915/display/intel_bios.c
index 2fd72b2fd109..3e7e96acb24a 100644
--- a/drivers/gpu/drm/i915/display/intel_bios.c
+++ b/drivers/gpu/drm/i915/display/intel_bios.c
@@ -2201,6 +2201,9 @@ static u8 map_ddc_pin(struct drm_i915_private *i915, u8 vbt_pin)
const u8 *ddc_pin_map;
int i, n_entries;
+ if (IS_DGFX(i915))
+ return vbt_pin;
+
if (INTEL_PCH_TYPE(i915) >= PCH_LNL || HAS_PCH_MTP(i915) ||
IS_ALDERLAKE_P(i915)) {
ddc_pin_map = adlp_ddc_pin_map;
@@ -2208,8 +2211,6 @@ static u8 map_ddc_pin(struct drm_i915_private *i915, u8 vbt_pin)
} else if (IS_ALDERLAKE_S(i915)) {
ddc_pin_map = adls_ddc_pin_map;
n_entries = ARRAY_SIZE(adls_ddc_pin_map);
- } else if (INTEL_PCH_TYPE(i915) >= PCH_DG1) {
- return vbt_pin;
} else if (IS_ROCKETLAKE(i915) && INTEL_PCH_TYPE(i915) == PCH_TGP) {
ddc_pin_map = rkl_pch_tgp_ddc_pin_map;
n_entries = ARRAY_SIZE(rkl_pch_tgp_ddc_pin_map);
diff --git a/drivers/gpu/drm/i915/display/intel_bw.c b/drivers/gpu/drm/i915/display/intel_bw.c
index bef96db62c80..7f2a50b4f494 100644
--- a/drivers/gpu/drm/i915/display/intel_bw.c
+++ b/drivers/gpu/drm/i915/display/intel_bw.c
@@ -87,7 +87,8 @@ static int icl_pcode_read_qgv_point_info(struct drm_i915_private *dev_priv,
return ret;
dclk = val & 0xffff;
- sp->dclk = DIV_ROUND_UP((16667 * dclk) + (DISPLAY_VER(dev_priv) > 11 ? 500 : 0), 1000);
+ sp->dclk = DIV_ROUND_UP((16667 * dclk) + (DISPLAY_VER(dev_priv) >= 12 ? 500 : 0),
+ 1000);
sp->t_rp = (val & 0xff0000) >> 16;
sp->t_rcd = (val & 0xff000000) >> 24;
@@ -480,7 +481,7 @@ static int tgl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel
if (num_channels < qi.max_numchannels && DISPLAY_VER(dev_priv) >= 12)
qi.deinterleave = max(DIV_ROUND_UP(qi.deinterleave, 2), 1);
- if (DISPLAY_VER(dev_priv) > 11 && num_channels > qi.max_numchannels)
+ if (DISPLAY_VER(dev_priv) >= 12 && num_channels > qi.max_numchannels)
drm_warn(&dev_priv->drm, "Number of channels exceeds max number of channels.");
if (qi.max_numchannels != 0)
num_channels = min_t(u8, num_channels, qi.max_numchannels);
@@ -897,7 +898,7 @@ static int icl_find_qgv_points(struct drm_i915_private *i915,
unsigned int idx;
unsigned int max_data_rate;
- if (DISPLAY_VER(i915) > 11)
+ if (DISPLAY_VER(i915) >= 12)
idx = tgl_max_bw_index(i915, num_active_planes, i);
else
idx = icl_max_bw_index(i915, num_active_planes, i);
diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.c b/drivers/gpu/drm/i915/display/intel_cdclk.c
index b93d1ad7936d..8bb6bab7c8cd 100644
--- a/drivers/gpu/drm/i915/display/intel_cdclk.c
+++ b/drivers/gpu/drm/i915/display/intel_cdclk.c
@@ -2597,7 +2597,7 @@ static int intel_vdsc_min_cdclk(const struct intel_crtc_state *crtc_state)
* Since PPC = 2 with bigjoiner
* => CDCLK >= compressed_bpp * Pixel clock / 2 * Bigjoiner Interface bits
*/
- int bigjoiner_interface_bits = DISPLAY_VER(i915) > 13 ? 36 : 24;
+ int bigjoiner_interface_bits = DISPLAY_VER(i915) >= 14 ? 36 : 24;
int min_cdclk_bj =
(to_bpp_int_roundup(crtc_state->dsc.compressed_bpp_x16) *
pixel_clock) / (2 * bigjoiner_interface_bits);
diff --git a/drivers/gpu/drm/i915/display/intel_crt.c b/drivers/gpu/drm/i915/display/intel_crt.c
index 0e33a0523a75..abaacea5c2cc 100644
--- a/drivers/gpu/drm/i915/display/intel_crt.c
+++ b/drivers/gpu/drm/i915/display/intel_crt.c
@@ -348,8 +348,13 @@ intel_crt_mode_valid(struct drm_connector *connector,
struct drm_device *dev = connector->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
int max_dotclk = dev_priv->max_dotclk_freq;
+ enum drm_mode_status status;
int max_clock;
+ status = intel_cpu_transcoder_mode_valid(dev_priv, mode);
+ if (status != MODE_OK)
+ return status;
+
if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
return MODE_NO_DBLESCAN;
diff --git a/drivers/gpu/drm/i915/display/intel_crtc_state_dump.c b/drivers/gpu/drm/i915/display/intel_crtc_state_dump.c
index 2d15e82c0b3d..49fd100ec98a 100644
--- a/drivers/gpu/drm/i915/display/intel_crtc_state_dump.c
+++ b/drivers/gpu/drm/i915/display/intel_crtc_state_dump.c
@@ -262,6 +262,15 @@ void intel_crtc_state_dump(const struct intel_crtc_state *pipe_config,
drm_dbg_kms(&i915->drm, "fec: %s, enhanced framing: %s\n",
str_enabled_disabled(pipe_config->fec_enable),
str_enabled_disabled(pipe_config->enhanced_framing));
+
+ drm_dbg_kms(&i915->drm, "sdp split: %s\n",
+ str_enabled_disabled(pipe_config->sdp_split_enable));
+
+ drm_dbg_kms(&i915->drm, "psr: %s, psr2: %s, panel replay: %s, selective fetch: %s\n",
+ str_enabled_disabled(pipe_config->has_psr),
+ str_enabled_disabled(pipe_config->has_psr2),
+ str_enabled_disabled(pipe_config->has_panel_replay),
+ str_enabled_disabled(pipe_config->enable_psr2_sel_fetch));
}
drm_dbg_kms(&i915->drm, "framestart delay: %d, MSA timing delay: %d\n",
diff --git a/drivers/gpu/drm/i915/display/intel_cursor.c b/drivers/gpu/drm/i915/display/intel_cursor.c
index b342fad180ca..926e2de00eb5 100644
--- a/drivers/gpu/drm/i915/display/intel_cursor.c
+++ b/drivers/gpu/drm/i915/display/intel_cursor.c
@@ -21,8 +21,11 @@
#include "intel_fb_pin.h"
#include "intel_frontbuffer.h"
#include "intel_psr.h"
+#include "intel_psr_regs.h"
#include "skl_watermark.h"
+#include "gem/i915_gem_object.h"
+
/* Cursor formats */
static const u32 intel_cursor_formats[] = {
DRM_FORMAT_ARGB8888,
@@ -33,11 +36,11 @@ static u32 intel_cursor_base(const struct intel_plane_state *plane_state)
struct drm_i915_private *dev_priv =
to_i915(plane_state->uapi.plane->dev);
const struct drm_framebuffer *fb = plane_state->hw.fb;
- const struct drm_i915_gem_object *obj = intel_fb_obj(fb);
+ struct drm_i915_gem_object *obj = intel_fb_obj(fb);
u32 base;
if (DISPLAY_INFO(dev_priv)->cursor_needs_physical)
- base = sg_dma_address(obj->mm.pages->sgl);
+ base = i915_gem_object_get_dma_address(obj, 0);
else
base = intel_plane_ggtt_offset(plane_state);
@@ -484,6 +487,35 @@ static int i9xx_check_cursor(struct intel_crtc_state *crtc_state,
return 0;
}
+static void i9xx_cursor_disable_sel_fetch_arm(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ enum pipe pipe = plane->pipe;
+
+ if (!crtc_state->enable_psr2_sel_fetch)
+ return;
+
+ intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_CTL(pipe, plane->id), 0);
+}
+
+static void i9xx_cursor_update_sel_fetch_arm(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
+{
+ struct drm_i915_private *i915 = to_i915(plane->base.dev);
+ enum pipe pipe = plane->pipe;
+
+ if (!crtc_state->enable_psr2_sel_fetch)
+ return;
+
+ if (drm_rect_height(&plane_state->psr2_sel_fetch_area) > 0)
+ intel_de_write_fw(i915, PLANE_SEL_FETCH_CTL(pipe, plane->id),
+ plane_state->ctl);
+ else
+ i9xx_cursor_disable_sel_fetch_arm(plane, crtc_state);
+}
+
/* TODO: split into noarm+arm pair */
static void i9xx_cursor_update_arm(struct intel_plane *plane,
const struct intel_crtc_state *crtc_state,
@@ -531,10 +563,10 @@ static void i9xx_cursor_update_arm(struct intel_plane *plane,
skl_write_cursor_wm(plane, crtc_state);
if (plane_state)
- intel_psr2_program_plane_sel_fetch_arm(plane, crtc_state,
- plane_state);
+ i9xx_cursor_update_sel_fetch_arm(plane, crtc_state,
+ plane_state);
else
- intel_psr2_disable_plane_sel_fetch_arm(plane, crtc_state);
+ i9xx_cursor_disable_sel_fetch_arm(plane, crtc_state);
if (plane->cursor.base != base ||
plane->cursor.size != fbc_ctl ||
diff --git a/drivers/gpu/drm/i915/display/intel_cx0_phy.c b/drivers/gpu/drm/i915/display/intel_cx0_phy.c
index a8fa76580802..5fbec5784b83 100644
--- a/drivers/gpu/drm/i915/display/intel_cx0_phy.c
+++ b/drivers/gpu/drm/i915/display/intel_cx0_phy.c
@@ -415,9 +415,15 @@ void intel_cx0_phy_set_signal_levels(struct intel_encoder *encoder,
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
const struct intel_ddi_buf_trans *trans;
enum phy phy = intel_port_to_phy(i915, encoder->port);
- u8 owned_lane_mask = intel_cx0_get_owned_lane_mask(i915, encoder);
+ u8 owned_lane_mask;
intel_wakeref_t wakeref;
int n_entries, ln;
+ struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
+
+ if (intel_tc_port_in_tbt_alt_mode(dig_port))
+ return;
+
+ owned_lane_mask = intel_cx0_get_owned_lane_mask(i915, encoder);
wakeref = intel_cx0_phy_transaction_begin(encoder);
@@ -3136,6 +3142,9 @@ void intel_cx0pll_state_verify(struct intel_atomic_state *state,
encoder = intel_get_crtc_new_encoder(state, new_crtc_state);
phy = intel_port_to_phy(i915, encoder->port);
+ if (intel_tc_port_in_tbt_alt_mode(enc_to_dig_port(encoder)))
+ return;
+
intel_cx0pll_readout_hw_state(encoder, &mpll_hw_state);
if (intel_is_c10phy(i915, phy))
diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
index 5cf162628b95..d955957b7d18 100644
--- a/drivers/gpu/drm/i915/display/intel_display.c
+++ b/drivers/gpu/drm/i915/display/intel_display.c
@@ -2627,7 +2627,7 @@ static void intel_set_transcoder_timings(const struct intel_crtc_state *crtc_sta
crtc_vblank_start = 1;
}
- if (DISPLAY_VER(dev_priv) > 3)
+ if (DISPLAY_VER(dev_priv) >= 4)
intel_de_write(dev_priv, TRANS_VSYNCSHIFT(cpu_transcoder),
vsyncshift);
@@ -3167,7 +3167,7 @@ static void bdw_set_pipe_misc(const struct intel_crtc_state *crtc_state)
break;
case 36:
/* Port output 12BPC defined for ADLP+ */
- if (DISPLAY_VER(dev_priv) > 12)
+ if (DISPLAY_VER(dev_priv) >= 13)
val |= PIPE_MISC_BPC_12_ADLP;
break;
default:
@@ -3224,7 +3224,7 @@ int bdw_get_pipe_misc_bpp(struct intel_crtc *crtc)
* MIPI DSI HW readout.
*/
case PIPE_MISC_BPC_12_ADLP:
- if (DISPLAY_VER(dev_priv) > 12)
+ if (DISPLAY_VER(dev_priv) >= 13)
return 36;
fallthrough;
default:
@@ -4923,6 +4923,8 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
#define PIPE_CONF_CHECK_X(name) do { \
if (current_config->name != pipe_config->name) { \
+ BUILD_BUG_ON_MSG(__same_type(current_config->name, bool), \
+ __stringify(name) " is bool"); \
pipe_config_mismatch(fastset, crtc, __stringify(name), \
"(expected 0x%08x, found 0x%08x)", \
current_config->name, \
@@ -4933,6 +4935,8 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
#define PIPE_CONF_CHECK_X_WITH_MASK(name, mask) do { \
if ((current_config->name & (mask)) != (pipe_config->name & (mask))) { \
+ BUILD_BUG_ON_MSG(__same_type(current_config->name, bool), \
+ __stringify(name) " is bool"); \
pipe_config_mismatch(fastset, crtc, __stringify(name), \
"(expected 0x%08x, found 0x%08x)", \
current_config->name & (mask), \
@@ -4943,6 +4947,8 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
#define PIPE_CONF_CHECK_I(name) do { \
if (current_config->name != pipe_config->name) { \
+ BUILD_BUG_ON_MSG(__same_type(current_config->name, bool), \
+ __stringify(name) " is bool"); \
pipe_config_mismatch(fastset, crtc, __stringify(name), \
"(expected %i, found %i)", \
current_config->name, \
@@ -4953,6 +4959,8 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
#define PIPE_CONF_CHECK_BOOL(name) do { \
if (current_config->name != pipe_config->name) { \
+ BUILD_BUG_ON_MSG(!__same_type(current_config->name, bool), \
+ __stringify(name) " is not bool"); \
pipe_config_mismatch(fastset, crtc, __stringify(name), \
"(expected %s, found %s)", \
str_yes_no(current_config->name), \
@@ -5091,8 +5099,8 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
#define PIPE_CONF_QUIRK(quirk) \
((current_config->quirks | pipe_config->quirks) & (quirk))
- PIPE_CONF_CHECK_I(hw.enable);
- PIPE_CONF_CHECK_I(hw.active);
+ PIPE_CONF_CHECK_BOOL(hw.enable);
+ PIPE_CONF_CHECK_BOOL(hw.active);
PIPE_CONF_CHECK_I(cpu_transcoder);
PIPE_CONF_CHECK_I(mst_master_transcoder);
@@ -5301,8 +5309,8 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
PIPE_CONF_CHECK_I(dsc.config.second_line_bpg_offset);
PIPE_CONF_CHECK_I(dsc.config.nsl_bpg_offset);
- PIPE_CONF_CHECK_I(dsc.compression_enable);
- PIPE_CONF_CHECK_I(dsc.dsc_split);
+ PIPE_CONF_CHECK_BOOL(dsc.compression_enable);
+ PIPE_CONF_CHECK_BOOL(dsc.dsc_split);
PIPE_CONF_CHECK_I(dsc.compressed_bpp_x16);
PIPE_CONF_CHECK_BOOL(splitter.enable);
@@ -7354,7 +7362,7 @@ int intel_atomic_commit(struct drm_device *dev, struct drm_atomic_state *_state,
for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
intel_color_cleanup_commit(new_crtc_state);
- drm_atomic_helper_cleanup_planes(dev, &state->base);
+ drm_atomic_helper_unprepare_planes(dev, &state->base);
intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
return ret;
}
@@ -7734,6 +7742,16 @@ enum drm_mode_status intel_mode_valid(struct drm_device *dev,
mode->vtotal > vtotal_max)
return MODE_V_ILLEGAL;
+ return MODE_OK;
+}
+
+enum drm_mode_status intel_cpu_transcoder_mode_valid(struct drm_i915_private *dev_priv,
+ const struct drm_display_mode *mode)
+{
+ /*
+ * Additional transcoder timing limits,
+ * excluding BXT/GLK DSI transcoders.
+ */
if (DISPLAY_VER(dev_priv) >= 5) {
if (mode->hdisplay < 64 ||
mode->htotal - mode->hdisplay < 32)
@@ -7753,7 +7771,7 @@ enum drm_mode_status intel_mode_valid(struct drm_device *dev,
* Cantiga+ cannot handle modes with a hsync front porch of 0.
* WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw.
*/
- if ((DISPLAY_VER(dev_priv) > 4 || IS_G4X(dev_priv)) &&
+ if ((DISPLAY_VER(dev_priv) >= 5 || IS_G4X(dev_priv)) &&
mode->hsync_start == mode->hdisplay)
return MODE_H_ILLEGAL;
diff --git a/drivers/gpu/drm/i915/display/intel_display.h b/drivers/gpu/drm/i915/display/intel_display.h
index 8548f49e3972..f4a0773f0fca 100644
--- a/drivers/gpu/drm/i915/display/intel_display.h
+++ b/drivers/gpu/drm/i915/display/intel_display.h
@@ -402,6 +402,9 @@ enum drm_mode_status
intel_mode_valid_max_plane_size(struct drm_i915_private *dev_priv,
const struct drm_display_mode *mode,
bool bigjoiner);
+enum drm_mode_status
+intel_cpu_transcoder_mode_valid(struct drm_i915_private *i915,
+ const struct drm_display_mode *mode);
enum phy intel_port_to_phy(struct drm_i915_private *i915, enum port port);
bool is_trans_port_sync_mode(const struct intel_crtc_state *state);
bool is_trans_port_sync_master(const struct intel_crtc_state *state);
diff --git a/drivers/gpu/drm/i915/display/intel_display_debugfs.c b/drivers/gpu/drm/i915/display/intel_display_debugfs.c
index 915420d0cef8..d951edb36687 100644
--- a/drivers/gpu/drm/i915/display/intel_display_debugfs.c
+++ b/drivers/gpu/drm/i915/display/intel_display_debugfs.c
@@ -1095,7 +1095,7 @@ void intel_display_debugfs_register(struct drm_i915_private *i915)
for (i = 0; i < ARRAY_SIZE(intel_display_debugfs_files); i++) {
debugfs_create_file(intel_display_debugfs_files[i].name,
- S_IRUGO | S_IWUSR,
+ 0644,
minor->debugfs_root,
to_i915(minor->dev),
intel_display_debugfs_files[i].fops);
@@ -1116,11 +1116,10 @@ void intel_display_debugfs_register(struct drm_i915_private *i915)
static int i915_panel_show(struct seq_file *m, void *data)
{
- struct drm_connector *connector = m->private;
- struct intel_dp *intel_dp =
- intel_attached_dp(to_intel_connector(connector));
+ struct intel_connector *connector = m->private;
+ struct intel_dp *intel_dp = intel_attached_dp(connector);
- if (connector->status != connector_status_connected)
+ if (connector->base.status != connector_status_connected)
return -ENODEV;
seq_printf(m, "Panel power up delay: %d\n",
@@ -1138,23 +1137,23 @@ DEFINE_SHOW_ATTRIBUTE(i915_panel);
static int i915_hdcp_sink_capability_show(struct seq_file *m, void *data)
{
- struct drm_connector *connector = m->private;
- struct drm_i915_private *i915 = to_i915(connector->dev);
- struct intel_connector *intel_connector = to_intel_connector(connector);
+ struct intel_connector *connector = m->private;
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
int ret;
ret = drm_modeset_lock_single_interruptible(&i915->drm.mode_config.connection_mutex);
if (ret)
return ret;
- if (!connector->encoder || connector->status != connector_status_connected) {
+ if (!connector->base.encoder ||
+ connector->base.status != connector_status_connected) {
ret = -ENODEV;
goto out;
}
- seq_printf(m, "%s:%d HDCP version: ", connector->name,
- connector->base.id);
- intel_hdcp_info(m, intel_connector);
+ seq_printf(m, "%s:%d HDCP version: ", connector->base.name,
+ connector->base.base.id);
+ intel_hdcp_info(m, connector);
out:
drm_modeset_unlock(&i915->drm.mode_config.connection_mutex);
@@ -1165,16 +1164,16 @@ DEFINE_SHOW_ATTRIBUTE(i915_hdcp_sink_capability);
static int i915_lpsp_capability_show(struct seq_file *m, void *data)
{
- struct drm_connector *connector = m->private;
- struct drm_i915_private *i915 = to_i915(connector->dev);
- struct intel_encoder *encoder;
+ struct intel_connector *connector = m->private;
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_encoder *encoder = intel_attached_encoder(connector);
+ int connector_type = connector->base.connector_type;
bool lpsp_capable = false;
- encoder = intel_attached_encoder(to_intel_connector(connector));
if (!encoder)
return -ENODEV;
- if (connector->status != connector_status_connected)
+ if (connector->base.status != connector_status_connected)
return -ENODEV;
if (DISPLAY_VER(i915) >= 13)
@@ -1187,15 +1186,15 @@ static int i915_lpsp_capability_show(struct seq_file *m, void *data)
*/
lpsp_capable = encoder->port <= PORT_B;
else if (DISPLAY_VER(i915) == 11)
- lpsp_capable = (connector->connector_type == DRM_MODE_CONNECTOR_DSI ||
- connector->connector_type == DRM_MODE_CONNECTOR_eDP);
+ lpsp_capable = (connector_type == DRM_MODE_CONNECTOR_DSI ||
+ connector_type == DRM_MODE_CONNECTOR_eDP);
else if (IS_DISPLAY_VER(i915, 9, 10))
lpsp_capable = (encoder->port == PORT_A &&
- (connector->connector_type == DRM_MODE_CONNECTOR_DSI ||
- connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
- connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort));
+ (connector_type == DRM_MODE_CONNECTOR_DSI ||
+ connector_type == DRM_MODE_CONNECTOR_eDP ||
+ connector_type == DRM_MODE_CONNECTOR_DisplayPort));
else if (IS_HASWELL(i915) || IS_BROADWELL(i915))
- lpsp_capable = connector->connector_type == DRM_MODE_CONNECTOR_eDP;
+ lpsp_capable = connector_type == DRM_MODE_CONNECTOR_eDP;
seq_printf(m, "LPSP: %s\n", lpsp_capable ? "capable" : "incapable");
@@ -1205,7 +1204,7 @@ DEFINE_SHOW_ATTRIBUTE(i915_lpsp_capability);
static int i915_dsc_fec_support_show(struct seq_file *m, void *data)
{
- struct intel_connector *connector = to_intel_connector(m->private);
+ struct intel_connector *connector = m->private;
struct drm_i915_private *i915 = to_i915(connector->base.dev);
struct drm_crtc *crtc;
struct intel_dp *intel_dp;
@@ -1275,13 +1274,13 @@ static ssize_t i915_dsc_fec_support_write(struct file *file,
const char __user *ubuf,
size_t len, loff_t *offp)
{
+ struct seq_file *m = file->private_data;
+ struct intel_connector *connector = m->private;
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_encoder *encoder = intel_attached_encoder(connector);
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
bool dsc_enable = false;
int ret;
- struct drm_connector *connector =
- ((struct seq_file *)file->private_data)->private;
- struct intel_encoder *encoder = intel_attached_encoder(to_intel_connector(connector));
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
- struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
if (len == 0)
return 0;
@@ -1319,22 +1318,22 @@ static const struct file_operations i915_dsc_fec_support_fops = {
static int i915_dsc_bpc_show(struct seq_file *m, void *data)
{
- struct drm_connector *connector = m->private;
- struct drm_device *dev = connector->dev;
+ struct intel_connector *connector = m->private;
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_encoder *encoder = intel_attached_encoder(connector);
struct drm_crtc *crtc;
struct intel_crtc_state *crtc_state;
- struct intel_encoder *encoder = intel_attached_encoder(to_intel_connector(connector));
int ret;
if (!encoder)
return -ENODEV;
- ret = drm_modeset_lock_single_interruptible(&dev->mode_config.connection_mutex);
+ ret = drm_modeset_lock_single_interruptible(&i915->drm.mode_config.connection_mutex);
if (ret)
return ret;
- crtc = connector->state->crtc;
- if (connector->status != connector_status_connected || !crtc) {
+ crtc = connector->base.state->crtc;
+ if (connector->base.status != connector_status_connected || !crtc) {
ret = -ENODEV;
goto out;
}
@@ -1342,7 +1341,7 @@ static int i915_dsc_bpc_show(struct seq_file *m, void *data)
crtc_state = to_intel_crtc_state(crtc->state);
seq_printf(m, "Input_BPC: %d\n", crtc_state->dsc.config.bits_per_component);
-out: drm_modeset_unlock(&dev->mode_config.connection_mutex);
+out: drm_modeset_unlock(&i915->drm.mode_config.connection_mutex);
return ret;
}
@@ -1351,9 +1350,9 @@ static ssize_t i915_dsc_bpc_write(struct file *file,
const char __user *ubuf,
size_t len, loff_t *offp)
{
- struct drm_connector *connector =
- ((struct seq_file *)file->private_data)->private;
- struct intel_encoder *encoder = intel_attached_encoder(to_intel_connector(connector));
+ struct seq_file *m = file->private_data;
+ struct intel_connector *connector = m->private;
+ struct intel_encoder *encoder = intel_attached_encoder(connector);
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
int dsc_bpc = 0;
int ret;
@@ -1385,22 +1384,22 @@ static const struct file_operations i915_dsc_bpc_fops = {
static int i915_dsc_output_format_show(struct seq_file *m, void *data)
{
- struct drm_connector *connector = m->private;
- struct drm_device *dev = connector->dev;
+ struct intel_connector *connector = m->private;
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_encoder *encoder = intel_attached_encoder(connector);
struct drm_crtc *crtc;
struct intel_crtc_state *crtc_state;
- struct intel_encoder *encoder = intel_attached_encoder(to_intel_connector(connector));
int ret;
if (!encoder)
return -ENODEV;
- ret = drm_modeset_lock_single_interruptible(&dev->mode_config.connection_mutex);
+ ret = drm_modeset_lock_single_interruptible(&i915->drm.mode_config.connection_mutex);
if (ret)
return ret;
- crtc = connector->state->crtc;
- if (connector->status != connector_status_connected || !crtc) {
+ crtc = connector->base.state->crtc;
+ if (connector->base.status != connector_status_connected || !crtc) {
ret = -ENODEV;
goto out;
}
@@ -1409,7 +1408,7 @@ static int i915_dsc_output_format_show(struct seq_file *m, void *data)
seq_printf(m, "DSC_Output_Format: %s\n",
intel_output_format_name(crtc_state->output_format));
-out: drm_modeset_unlock(&dev->mode_config.connection_mutex);
+out: drm_modeset_unlock(&i915->drm.mode_config.connection_mutex);
return ret;
}
@@ -1418,9 +1417,9 @@ static ssize_t i915_dsc_output_format_write(struct file *file,
const char __user *ubuf,
size_t len, loff_t *offp)
{
- struct drm_connector *connector =
- ((struct seq_file *)file->private_data)->private;
- struct intel_encoder *encoder = intel_attached_encoder(to_intel_connector(connector));
+ struct seq_file *m = file->private_data;
+ struct intel_connector *connector = m->private;
+ struct intel_encoder *encoder = intel_attached_encoder(connector);
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
int dsc_output_format = 0;
int ret;
@@ -1452,33 +1451,32 @@ static const struct file_operations i915_dsc_output_format_fops = {
static int i915_dsc_fractional_bpp_show(struct seq_file *m, void *data)
{
- struct drm_connector *connector = m->private;
- struct drm_device *dev = connector->dev;
+ struct intel_connector *connector = m->private;
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_encoder *encoder = intel_attached_encoder(connector);
struct drm_crtc *crtc;
struct intel_dp *intel_dp;
- struct intel_connector *intel_connector = to_intel_connector(connector);
- struct intel_encoder *encoder = intel_attached_encoder(intel_connector);
int ret;
if (!encoder)
return -ENODEV;
- ret = drm_modeset_lock_single_interruptible(&dev->mode_config.connection_mutex);
+ ret = drm_modeset_lock_single_interruptible(&i915->drm.mode_config.connection_mutex);
if (ret)
return ret;
- crtc = connector->state->crtc;
- if (connector->status != connector_status_connected || !crtc) {
+ crtc = connector->base.state->crtc;
+ if (connector->base.status != connector_status_connected || !crtc) {
ret = -ENODEV;
goto out;
}
- intel_dp = intel_attached_dp(intel_connector);
+ intel_dp = intel_attached_dp(connector);
seq_printf(m, "Force_DSC_Fractional_BPP_Enable: %s\n",
str_yes_no(intel_dp->force_dsc_fractional_bpp_en));
out:
- drm_modeset_unlock(&dev->mode_config.connection_mutex);
+ drm_modeset_unlock(&i915->drm.mode_config.connection_mutex);
return ret;
}
@@ -1487,10 +1485,10 @@ static ssize_t i915_dsc_fractional_bpp_write(struct file *file,
const char __user *ubuf,
size_t len, loff_t *offp)
{
- struct drm_connector *connector =
- ((struct seq_file *)file->private_data)->private;
- struct intel_encoder *encoder = intel_attached_encoder(to_intel_connector(connector));
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct seq_file *m = file->private_data;
+ struct intel_connector *connector = m->private;
+ struct intel_encoder *encoder = intel_attached_encoder(connector);
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
bool dsc_fractional_bpp_enable = false;
int ret;
@@ -1565,39 +1563,38 @@ DEFINE_SHOW_ATTRIBUTE(intel_crtc_pipe);
/**
* intel_connector_debugfs_add - add i915 specific connector debugfs files
- * @intel_connector: pointer to a registered drm_connector
+ * @connector: pointer to a registered intel_connector
*
* Cleanup will be done by drm_connector_unregister() through a call to
* drm_debugfs_connector_remove().
*/
-void intel_connector_debugfs_add(struct intel_connector *intel_connector)
+void intel_connector_debugfs_add(struct intel_connector *connector)
{
- struct drm_connector *connector = &intel_connector->base;
- struct dentry *root = connector->debugfs_entry;
- struct drm_i915_private *dev_priv = to_i915(connector->dev);
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct dentry *root = connector->base.debugfs_entry;
+ int connector_type = connector->base.connector_type;
/* The connector must have been registered beforehands. */
if (!root)
return;
- intel_drrs_connector_debugfs_add(intel_connector);
- intel_psr_connector_debugfs_add(intel_connector);
+ intel_drrs_connector_debugfs_add(connector);
+ intel_psr_connector_debugfs_add(connector);
- if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
- debugfs_create_file("i915_panel_timings", S_IRUGO, root,
+ if (connector_type == DRM_MODE_CONNECTOR_eDP)
+ debugfs_create_file("i915_panel_timings", 0444, root,
connector, &i915_panel_fops);
- if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
- connector->connector_type == DRM_MODE_CONNECTOR_HDMIA ||
- connector->connector_type == DRM_MODE_CONNECTOR_HDMIB) {
- debugfs_create_file("i915_hdcp_sink_capability", S_IRUGO, root,
+ if (connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
+ connector_type == DRM_MODE_CONNECTOR_HDMIA ||
+ connector_type == DRM_MODE_CONNECTOR_HDMIB) {
+ debugfs_create_file("i915_hdcp_sink_capability", 0444, root,
connector, &i915_hdcp_sink_capability_fops);
}
- if (DISPLAY_VER(dev_priv) >= 11 &&
- ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort &&
- !to_intel_connector(connector)->mst_port) ||
- connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
+ if (DISPLAY_VER(i915) >= 11 &&
+ ((connector_type == DRM_MODE_CONNECTOR_DisplayPort && !connector->mst_port) ||
+ connector_type == DRM_MODE_CONNECTOR_eDP)) {
debugfs_create_file("i915_dsc_fec_support", 0644, root,
connector, &i915_dsc_fec_support_fops);
@@ -1611,11 +1608,11 @@ void intel_connector_debugfs_add(struct intel_connector *intel_connector)
connector, &i915_dsc_fractional_bpp_fops);
}
- if (connector->connector_type == DRM_MODE_CONNECTOR_DSI ||
- connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
- connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
- connector->connector_type == DRM_MODE_CONNECTOR_HDMIA ||
- connector->connector_type == DRM_MODE_CONNECTOR_HDMIB)
+ if (connector_type == DRM_MODE_CONNECTOR_DSI ||
+ connector_type == DRM_MODE_CONNECTOR_eDP ||
+ connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
+ connector_type == DRM_MODE_CONNECTOR_HDMIA ||
+ connector_type == DRM_MODE_CONNECTOR_HDMIB)
debugfs_create_file("i915_lpsp_capability", 0444, root,
connector, &i915_lpsp_capability_fops);
}
diff --git a/drivers/gpu/drm/i915/display/intel_display_device.h b/drivers/gpu/drm/i915/display/intel_display_device.h
index 4299cc452e05..79e9f1c3e241 100644
--- a/drivers/gpu/drm/i915/display/intel_display_device.h
+++ b/drivers/gpu/drm/i915/display/intel_display_device.h
@@ -49,7 +49,7 @@ struct drm_printer;
#define HAS_DSC(__i915) (DISPLAY_RUNTIME_INFO(__i915)->has_dsc)
#define HAS_FBC(i915) (DISPLAY_RUNTIME_INFO(i915)->fbc_mask != 0)
#define HAS_FPGA_DBG_UNCLAIMED(i915) (DISPLAY_INFO(i915)->has_fpga_dbg)
-#define HAS_FW_BLC(i915) (DISPLAY_VER(i915) > 2)
+#define HAS_FW_BLC(i915) (DISPLAY_VER(i915) >= 3)
#define HAS_GMBUS_IRQ(i915) (DISPLAY_VER(i915) >= 4)
#define HAS_GMBUS_BURST_READ(i915) (DISPLAY_VER(i915) >= 10 || IS_KABYLAKE(i915))
#define HAS_GMCH(i915) (DISPLAY_INFO(i915)->has_gmch)
diff --git a/drivers/gpu/drm/i915/display/intel_display_irq.c b/drivers/gpu/drm/i915/display/intel_display_irq.c
index bff4a76310c0..f8ed53f30b2e 100644
--- a/drivers/gpu/drm/i915/display/intel_display_irq.c
+++ b/drivers/gpu/drm/i915/display/intel_display_irq.c
@@ -896,7 +896,7 @@ gen8_de_misc_irq_handler(struct drm_i915_private *dev_priv, u32 iir)
}
if (!found)
- drm_err(&dev_priv->drm, "Unexpected DE Misc interrupt\n");
+ drm_err(&dev_priv->drm, "Unexpected DE Misc interrupt: 0x%08x\n", iir);
}
static void gen11_dsi_te_interrupt_handler(struct drm_i915_private *dev_priv,
@@ -1653,7 +1653,7 @@ void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
else if (HAS_PCH_SPLIT(dev_priv))
ibx_irq_postinstall(dev_priv);
- if (DISPLAY_VER(dev_priv) <= 10)
+ if (DISPLAY_VER(dev_priv) < 11)
de_misc_masked |= GEN8_DE_MISC_GSE;
if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c
index e390595d7341..5f091502719b 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power.c
+++ b/drivers/gpu/drm/i915/display/intel_display_power.c
@@ -405,7 +405,7 @@ print_async_put_domains_state(struct i915_power_domains *power_domains)
struct drm_i915_private,
display.power.domains);
- drm_dbg(&i915->drm, "async_put_wakeref %u\n",
+ drm_dbg(&i915->drm, "async_put_wakeref %lu\n",
power_domains->async_put_wakeref);
print_power_domains(power_domains, "async_put_domains[0]",
@@ -1697,14 +1697,14 @@ static void icl_display_core_init(struct drm_i915_private *dev_priv,
if (resume)
intel_dmc_load_program(dev_priv);
- /* Wa_14011508470:tgl,dg1,rkl,adl-s,adl-p */
- if (DISPLAY_VER(dev_priv) >= 12)
+ /* Wa_14011508470:tgl,dg1,rkl,adl-s,adl-p,dg2 */
+ if (IS_DISPLAY_IP_RANGE(dev_priv, IP_VER(12, 0), IP_VER(13, 0)))
intel_de_rmw(dev_priv, GEN11_CHICKEN_DCPR_2, 0,
DCPR_CLEAR_MEMSTAT_DIS | DCPR_SEND_RESP_IMM |
DCPR_MASK_LPMODE | DCPR_MASK_MAXLATENCY_MEMUP_CLR);
/* Wa_14011503030:xelpd */
- if (DISPLAY_VER(dev_priv) >= 13)
+ if (DISPLAY_VER(dev_priv) == 13)
intel_de_write(dev_priv, XELPD_DISPLAY_ERR_FATAL_MASK, ~0);
}
diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
index 1422c2370269..3b2482bf683f 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.c
+++ b/drivers/gpu/drm/i915/display/intel_dp.c
@@ -1227,6 +1227,10 @@ intel_dp_mode_valid(struct drm_connector *_connector,
enum drm_mode_status status;
bool dsc = false, bigjoiner = false;
+ status = intel_cpu_transcoder_mode_valid(dev_priv, mode);
+ if (status != MODE_OK)
+ return status;
+
if (mode->flags & DRM_MODE_FLAG_DBLCLK)
return MODE_H_ILLEGAL;
@@ -1886,7 +1890,7 @@ static int dsc_src_max_compressed_bpp(struct intel_dp *intel_dp)
* Max Compressed bpp for Gen 13+ is 27bpp.
* For earlier platform is 23bpp. (Bspec:49259).
*/
- if (DISPLAY_VER(i915) <= 12)
+ if (DISPLAY_VER(i915) < 13)
return 23;
else
return 27;
@@ -2844,19 +2848,12 @@ intel_dp_audio_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
- struct drm_connector *connector = conn_state->connector;
-
pipe_config->has_audio =
intel_dp_has_audio(encoder, pipe_config, conn_state) &&
intel_audio_compute_config(encoder, pipe_config, conn_state);
pipe_config->sdp_split_enable = pipe_config->has_audio &&
intel_dp_is_uhbr(pipe_config);
-
- drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] SDP split enable: %s\n",
- connector->base.id, connector->name,
- str_yes_no(pipe_config->sdp_split_enable));
}
int
diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.c b/drivers/gpu/drm/i915/display/intel_dp_mst.c
index 63364c9602ef..e8940acea8ad 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c
@@ -54,7 +54,7 @@ static int intel_dp_mst_check_constraints(struct drm_i915_private *i915, int bpp
struct intel_crtc_state *crtc_state,
bool dsc)
{
- if (intel_dp_is_uhbr(crtc_state) && DISPLAY_VER(i915) <= 13 && dsc) {
+ if (intel_dp_is_uhbr(crtc_state) && DISPLAY_VER(i915) < 14 && dsc) {
int output_bpp = bpp;
/* DisplayPort 2 128b/132b, bits per lane is always 32 */
int symbol_clock = crtc_state->port_clock / 32;
@@ -1282,6 +1282,10 @@ intel_dp_mst_mode_valid_ctx(struct drm_connector *connector,
return 0;
}
+ *status = intel_cpu_transcoder_mode_valid(dev_priv, mode);
+ if (*status != MODE_OK)
+ return 0;
+
if (mode->flags & DRM_MODE_FLAG_DBLSCAN) {
*status = MODE_NO_DBLESCAN;
return 0;
@@ -1328,6 +1332,10 @@ intel_dp_mst_mode_valid_ctx(struct drm_connector *connector,
if (intel_dp_need_bigjoiner(intel_dp, mode->hdisplay, target_clock)) {
bigjoiner = true;
max_dotclk *= 2;
+
+ /* TODO: add support for bigjoiner */
+ *status = MODE_CLOCK_HIGH;
+ return 0;
}
if (DISPLAY_VER(dev_priv) >= 10 &&
@@ -1362,11 +1370,15 @@ intel_dp_mst_mode_valid_ctx(struct drm_connector *connector,
* Big joiner configuration needs DSC for TGL which is not true for
* XE_LPD where uncompressed joiner is supported.
*/
- if (DISPLAY_VER(dev_priv) < 13 && bigjoiner && !dsc)
- return MODE_CLOCK_HIGH;
+ if (DISPLAY_VER(dev_priv) < 13 && bigjoiner && !dsc) {
+ *status = MODE_CLOCK_HIGH;
+ return 0;
+ }
- if (mode_rate > max_rate && !dsc)
- return MODE_CLOCK_HIGH;
+ if (mode_rate > max_rate && !dsc) {
+ *status = MODE_CLOCK_HIGH;
+ return 0;
+ }
*status = intel_mode_valid_max_plane_size(dev_priv, mode, false);
return 0;
diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
index 7958d0bd851e..ef57dad1a9cb 100644
--- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
+++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
@@ -4537,7 +4537,7 @@ void intel_shared_dpll_state_verify(struct intel_atomic_state *state,
"pll active mismatch (didn't expect pipe %c in active mask (0x%x))\n",
pipe_name(crtc->pipe), pll->active_mask);
I915_STATE_WARN(i915, pll->state.pipe_mask & pipe_mask,
- "pll enabled crtcs mismatch (found %x in enabled mask (0x%x))\n",
+ "pll enabled crtcs mismatch (found pipe %c in enabled mask (0x%x))\n",
pipe_name(crtc->pipe), pll->state.pipe_mask);
}
}
diff --git a/drivers/gpu/drm/i915/display/intel_dsb.c b/drivers/gpu/drm/i915/display/intel_dsb.c
index 9598d50f68f2..482c28b5c2de 100644
--- a/drivers/gpu/drm/i915/display/intel_dsb.c
+++ b/drivers/gpu/drm/i915/display/intel_dsb.c
@@ -341,7 +341,7 @@ static int intel_dsb_dewake_scanline(const struct intel_crtc_state *crtc_state)
}
static void _intel_dsb_commit(struct intel_dsb *dsb, u32 ctrl,
- unsigned int dewake_scanline)
+ int dewake_scanline)
{
struct intel_crtc *crtc = dsb->crtc;
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
diff --git a/drivers/gpu/drm/i915/display/intel_dsi_vbt.c b/drivers/gpu/drm/i915/display/intel_dsi_vbt.c
index 275d0218394c..a5d7fc8418c9 100644
--- a/drivers/gpu/drm/i915/display/intel_dsi_vbt.c
+++ b/drivers/gpu/drm/i915/display/intel_dsi_vbt.c
@@ -922,7 +922,7 @@ void intel_dsi_vbt_gpio_init(struct intel_dsi *intel_dsi, bool panel_is_on)
gpiod_add_lookup_table(gpiod_lookup_table);
if (want_panel_gpio) {
- intel_dsi->gpio_panel = gpiod_get(dev->dev, "panel", flags);
+ intel_dsi->gpio_panel = devm_gpiod_get(dev->dev, "panel", flags);
if (IS_ERR(intel_dsi->gpio_panel)) {
drm_err(&dev_priv->drm,
"Failed to own gpio for panel control\n");
@@ -932,7 +932,7 @@ void intel_dsi_vbt_gpio_init(struct intel_dsi *intel_dsi, bool panel_is_on)
if (want_backlight_gpio) {
intel_dsi->gpio_backlight =
- gpiod_get(dev->dev, "backlight", flags);
+ devm_gpiod_get(dev->dev, "backlight", flags);
if (IS_ERR(intel_dsi->gpio_backlight)) {
drm_err(&dev_priv->drm,
"Failed to own gpio for backlight control\n");
@@ -943,16 +943,3 @@ void intel_dsi_vbt_gpio_init(struct intel_dsi *intel_dsi, bool panel_is_on)
if (gpiod_lookup_table)
gpiod_remove_lookup_table(gpiod_lookup_table);
}
-
-void intel_dsi_vbt_gpio_cleanup(struct intel_dsi *intel_dsi)
-{
- if (intel_dsi->gpio_panel) {
- gpiod_put(intel_dsi->gpio_panel);
- intel_dsi->gpio_panel = NULL;
- }
-
- if (intel_dsi->gpio_backlight) {
- gpiod_put(intel_dsi->gpio_backlight);
- intel_dsi->gpio_backlight = NULL;
- }
-}
diff --git a/drivers/gpu/drm/i915/display/intel_dsi_vbt.h b/drivers/gpu/drm/i915/display/intel_dsi_vbt.h
index 468d873fab1a..3462fcc760e6 100644
--- a/drivers/gpu/drm/i915/display/intel_dsi_vbt.h
+++ b/drivers/gpu/drm/i915/display/intel_dsi_vbt.h
@@ -13,7 +13,6 @@ struct intel_dsi;
bool intel_dsi_vbt_init(struct intel_dsi *intel_dsi, u16 panel_id);
void intel_dsi_vbt_gpio_init(struct intel_dsi *intel_dsi, bool panel_is_on);
-void intel_dsi_vbt_gpio_cleanup(struct intel_dsi *intel_dsi);
void intel_dsi_vbt_exec_sequence(struct intel_dsi *intel_dsi,
enum mipi_seq seq_id);
void intel_dsi_log_params(struct intel_dsi *intel_dsi);
diff --git a/drivers/gpu/drm/i915/display/intel_dvo.c b/drivers/gpu/drm/i915/display/intel_dvo.c
index 55d6743374bd..9111e9d46486 100644
--- a/drivers/gpu/drm/i915/display/intel_dvo.c
+++ b/drivers/gpu/drm/i915/display/intel_dvo.c
@@ -217,11 +217,17 @@ intel_dvo_mode_valid(struct drm_connector *_connector,
struct drm_display_mode *mode)
{
struct intel_connector *connector = to_intel_connector(_connector);
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
struct intel_dvo *intel_dvo = intel_attached_dvo(connector);
const struct drm_display_mode *fixed_mode =
intel_panel_fixed_mode(connector, mode);
int max_dotclk = to_i915(connector->base.dev)->max_dotclk_freq;
int target_clock = mode->clock;
+ enum drm_mode_status status;
+
+ status = intel_cpu_transcoder_mode_valid(i915, mode);
+ if (status != MODE_OK)
+ return status;
if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
return MODE_NO_DBLESCAN;
diff --git a/drivers/gpu/drm/i915/display/intel_fb.c b/drivers/gpu/drm/i915/display/intel_fb.c
index 6d48aa3af95a..69c3cfe3120e 100644
--- a/drivers/gpu/drm/i915/display/intel_fb.c
+++ b/drivers/gpu/drm/i915/display/intel_fb.c
@@ -4,7 +4,6 @@
*/
#include <drm/drm_blend.h>
-#include <drm/drm_framebuffer.h>
#include <drm/drm_modeset_helper.h>
#include <linux/dma-fence.h>
@@ -15,6 +14,7 @@
#include "intel_display_types.h"
#include "intel_dpt.h"
#include "intel_fb.h"
+#include "intel_fb_bo.h"
#include "intel_frontbuffer.h"
#define check_array_bounds(i915, a, i) drm_WARN_ON(&(i915)->drm, (i) >= ARRAY_SIZE(a))
@@ -301,6 +301,33 @@ lookup_format_info(const struct drm_format_info formats[],
return NULL;
}
+unsigned int intel_fb_modifier_to_tiling(u64 fb_modifier)
+{
+ const struct intel_modifier_desc *md;
+ u8 tiling_caps;
+
+ md = lookup_modifier_or_null(fb_modifier);
+ if (!md)
+ return I915_TILING_NONE;
+
+ tiling_caps = lookup_modifier_or_null(fb_modifier)->plane_caps &
+ INTEL_PLANE_CAP_TILING_MASK;
+
+ switch (tiling_caps) {
+ case INTEL_PLANE_CAP_TILING_Y:
+ return I915_TILING_Y;
+ case INTEL_PLANE_CAP_TILING_X:
+ return I915_TILING_X;
+ case INTEL_PLANE_CAP_TILING_4:
+ case INTEL_PLANE_CAP_TILING_Yf:
+ case INTEL_PLANE_CAP_TILING_NONE:
+ return I915_TILING_NONE;
+ default:
+ MISSING_CASE(tiling_caps);
+ return I915_TILING_NONE;
+ }
+}
+
/**
* intel_fb_get_format_info: Get a modifier specific format information
* @cmd: FB add command structure
@@ -737,26 +764,6 @@ intel_fb_align_height(const struct drm_framebuffer *fb,
return ALIGN(height, tile_height);
}
-static unsigned int intel_fb_modifier_to_tiling(u64 fb_modifier)
-{
- u8 tiling_caps = lookup_modifier(fb_modifier)->plane_caps &
- INTEL_PLANE_CAP_TILING_MASK;
-
- switch (tiling_caps) {
- case INTEL_PLANE_CAP_TILING_Y:
- return I915_TILING_Y;
- case INTEL_PLANE_CAP_TILING_X:
- return I915_TILING_X;
- case INTEL_PLANE_CAP_TILING_4:
- case INTEL_PLANE_CAP_TILING_Yf:
- case INTEL_PLANE_CAP_TILING_NONE:
- return I915_TILING_NONE;
- default:
- MISSING_CASE(tiling_caps);
- return I915_TILING_NONE;
- }
-}
-
bool intel_fb_modifier_uses_dpt(struct drm_i915_private *i915, u64 modifier)
{
return HAS_DPT(i915) && modifier != DRM_FORMAT_MOD_LINEAR;
@@ -1374,7 +1381,8 @@ plane_view_scanout_stride(const struct intel_framebuffer *fb, int color_plane,
struct drm_i915_private *i915 = to_i915(fb->base.dev);
unsigned int stride_tiles;
- if (IS_ALDERLAKE_P(i915) || DISPLAY_VER(i915) >= 14)
+ if ((IS_ALDERLAKE_P(i915) || DISPLAY_VER(i915) >= 14) &&
+ src_stride_tiles < dst_stride_tiles)
stride_tiles = src_stride_tiles;
else
stride_tiles = dst_stride_tiles;
@@ -1657,10 +1665,10 @@ int intel_fill_fb_info(struct drm_i915_private *i915, struct intel_framebuffer *
max_size = max(max_size, offset + size);
}
- if (mul_u32_u32(max_size, tile_size) > obj->base.size) {
+ if (mul_u32_u32(max_size, tile_size) > intel_bo_to_drm_bo(obj)->size) {
drm_dbg_kms(&i915->drm,
"fb too big for bo (need %llu bytes, have %zu bytes)\n",
- mul_u32_u32(max_size, tile_size), obj->base.size);
+ mul_u32_u32(max_size, tile_size), intel_bo_to_drm_bo(obj)->size);
return -EINVAL;
}
@@ -1881,6 +1889,8 @@ static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
intel_frontbuffer_put(intel_fb->frontbuffer);
+ intel_fb_bo_framebuffer_fini(intel_fb_obj(fb));
+
kfree(intel_fb);
}
@@ -1889,7 +1899,7 @@ static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb,
unsigned int *handle)
{
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
- struct drm_i915_private *i915 = to_i915(obj->base.dev);
+ struct drm_i915_private *i915 = to_i915(intel_bo_to_drm_bo(obj)->dev);
if (i915_gem_object_is_userptr(obj)) {
drm_dbg(&i915->drm,
@@ -1897,7 +1907,7 @@ static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb,
return -EINVAL;
}
- return drm_gem_handle_create(file, &obj->base, handle);
+ return drm_gem_handle_create(file, intel_bo_to_drm_bo(obj), handle);
}
struct frontbuffer_fence_cb {
@@ -1975,61 +1985,30 @@ int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
struct drm_i915_gem_object *obj,
struct drm_mode_fb_cmd2 *mode_cmd)
{
- struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
+ struct drm_i915_private *dev_priv = to_i915(intel_bo_to_drm_bo(obj)->dev);
struct drm_framebuffer *fb = &intel_fb->base;
u32 max_stride;
- unsigned int tiling, stride;
int ret = -EINVAL;
int i;
- intel_fb->frontbuffer = intel_frontbuffer_get(obj);
- if (!intel_fb->frontbuffer)
- return -ENOMEM;
-
- i915_gem_object_lock(obj, NULL);
- tiling = i915_gem_object_get_tiling(obj);
- stride = i915_gem_object_get_stride(obj);
- i915_gem_object_unlock(obj);
+ ret = intel_fb_bo_framebuffer_init(intel_fb, obj, mode_cmd);
+ if (ret)
+ return ret;
- if (mode_cmd->flags & DRM_MODE_FB_MODIFIERS) {
- /*
- * If there's a fence, enforce that
- * the fb modifier and tiling mode match.
- */
- if (tiling != I915_TILING_NONE &&
- tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) {
- drm_dbg_kms(&dev_priv->drm,
- "tiling_mode doesn't match fb modifier\n");
- goto err;
- }
- } else {
- if (tiling == I915_TILING_X) {
- mode_cmd->modifier[0] = I915_FORMAT_MOD_X_TILED;
- } else if (tiling == I915_TILING_Y) {
- drm_dbg_kms(&dev_priv->drm,
- "No Y tiling for legacy addfb\n");
- goto err;
- }
+ intel_fb->frontbuffer = intel_frontbuffer_get(obj);
+ if (!intel_fb->frontbuffer) {
+ ret = -ENOMEM;
+ goto err;
}
+ ret = -EINVAL;
if (!drm_any_plane_has_format(&dev_priv->drm,
mode_cmd->pixel_format,
mode_cmd->modifier[0])) {
drm_dbg_kms(&dev_priv->drm,
"unsupported pixel format %p4cc / modifier 0x%llx\n",
&mode_cmd->pixel_format, mode_cmd->modifier[0]);
- goto err;
- }
-
- /*
- * gen2/3 display engine uses the fence if present,
- * so the tiling mode must match the fb modifier exactly.
- */
- if (DISPLAY_VER(dev_priv) < 4 &&
- tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) {
- drm_dbg_kms(&dev_priv->drm,
- "tiling_mode must match fb modifier exactly on gen2/3\n");
- goto err;
+ goto err_frontbuffer_put;
}
max_stride = intel_fb_max_stride(dev_priv, mode_cmd->pixel_format,
@@ -2040,18 +2019,7 @@ int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
mode_cmd->modifier[0] != DRM_FORMAT_MOD_LINEAR ?
"tiled" : "linear",
mode_cmd->pitches[0], max_stride);
- goto err;
- }
-
- /*
- * If there's a fence, enforce that
- * the fb pitch and fence stride match.
- */
- if (tiling != I915_TILING_NONE && mode_cmd->pitches[0] != stride) {
- drm_dbg_kms(&dev_priv->drm,
- "pitch (%d) must match tiling stride (%d)\n",
- mode_cmd->pitches[0], stride);
- goto err;
+ goto err_frontbuffer_put;
}
/* FIXME need to adjust LINOFF/TILEOFF accordingly. */
@@ -2059,7 +2027,7 @@ int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
drm_dbg_kms(&dev_priv->drm,
"plane 0 offset (0x%08x) must be 0\n",
mode_cmd->offsets[0]);
- goto err;
+ goto err_frontbuffer_put;
}
drm_helper_mode_fill_fb_struct(&dev_priv->drm, fb, mode_cmd);
@@ -2070,7 +2038,7 @@ int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
if (mode_cmd->handles[i] != mode_cmd->handles[0]) {
drm_dbg_kms(&dev_priv->drm, "bad plane %d handle\n",
i);
- goto err;
+ goto err_frontbuffer_put;
}
stride_alignment = intel_fb_stride_alignment(fb, i);
@@ -2078,7 +2046,7 @@ int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
drm_dbg_kms(&dev_priv->drm,
"plane %d pitch (%d) must be at least %u byte aligned\n",
i, fb->pitches[i], stride_alignment);
- goto err;
+ goto err_frontbuffer_put;
}
if (intel_fb_is_gen12_ccs_aux_plane(fb, i)) {
@@ -2089,7 +2057,7 @@ int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
"ccs aux plane %d pitch (%d) must be %d\n",
i,
fb->pitches[i], ccs_aux_stride);
- goto err;
+ goto err_frontbuffer_put;
}
}
@@ -2098,7 +2066,7 @@ int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
ret = intel_fill_fb_info(dev_priv, intel_fb);
if (ret)
- goto err;
+ goto err_frontbuffer_put;
if (intel_fb_uses_dpt(fb)) {
struct i915_address_space *vm;
@@ -2107,7 +2075,7 @@ int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
if (IS_ERR(vm)) {
drm_dbg_kms(&dev_priv->drm, "failed to create DPT\n");
ret = PTR_ERR(vm);
- goto err;
+ goto err_frontbuffer_put;
}
intel_fb->dpt_vm = vm;
@@ -2124,8 +2092,10 @@ int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
err_free_dpt:
if (intel_fb_uses_dpt(fb))
intel_dpt_destroy(intel_fb->dpt_vm);
-err:
+err_frontbuffer_put:
intel_frontbuffer_put(intel_fb->frontbuffer);
+err:
+ intel_fb_bo_framebuffer_fini(obj);
return ret;
}
@@ -2137,23 +2107,14 @@ intel_user_framebuffer_create(struct drm_device *dev,
struct drm_framebuffer *fb;
struct drm_i915_gem_object *obj;
struct drm_mode_fb_cmd2 mode_cmd = *user_mode_cmd;
- struct drm_i915_private *i915;
-
- obj = i915_gem_object_lookup(filp, mode_cmd.handles[0]);
- if (!obj)
- return ERR_PTR(-ENOENT);
-
- /* object is backed with LMEM for discrete */
- i915 = to_i915(obj->base.dev);
- if (HAS_LMEM(i915) && !i915_gem_object_can_migrate(obj, INTEL_REGION_LMEM_0)) {
- /* object is "remote", not in local memory */
- i915_gem_object_put(obj);
- drm_dbg_kms(&i915->drm, "framebuffer must reside in local memory\n");
- return ERR_PTR(-EREMOTE);
- }
+ struct drm_i915_private *i915 = to_i915(dev);
+
+ obj = intel_fb_bo_lookup_valid_bo(i915, filp, &mode_cmd);
+ if (IS_ERR(obj))
+ return ERR_CAST(obj);
fb = intel_framebuffer_create(obj, &mode_cmd);
- i915_gem_object_put(obj);
+ drm_gem_object_put(intel_bo_to_drm_bo(obj));
return fb;
}
diff --git a/drivers/gpu/drm/i915/display/intel_fb.h b/drivers/gpu/drm/i915/display/intel_fb.h
index e85167d6bc34..23db6628f53e 100644
--- a/drivers/gpu/drm/i915/display/intel_fb.h
+++ b/drivers/gpu/drm/i915/display/intel_fb.h
@@ -95,4 +95,6 @@ intel_user_framebuffer_create(struct drm_device *dev,
bool intel_fb_modifier_uses_dpt(struct drm_i915_private *i915, u64 modifier);
bool intel_fb_uses_dpt(const struct drm_framebuffer *fb);
+unsigned int intel_fb_modifier_to_tiling(u64 fb_modifier);
+
#endif /* __INTEL_FB_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_fb_bo.c b/drivers/gpu/drm/i915/display/intel_fb_bo.c
new file mode 100644
index 000000000000..4be09541e509
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_fb_bo.c
@@ -0,0 +1,97 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2021 Intel Corporation
+ */
+
+#include <drm/drm_framebuffer.h>
+
+#include "gem/i915_gem_object.h"
+
+#include "i915_drv.h"
+#include "intel_fb.h"
+#include "intel_fb_bo.h"
+
+void intel_fb_bo_framebuffer_fini(struct drm_i915_gem_object *obj)
+{
+ /* Nothing to do for i915 */
+}
+
+int intel_fb_bo_framebuffer_init(struct intel_framebuffer *intel_fb,
+ struct drm_i915_gem_object *obj,
+ struct drm_mode_fb_cmd2 *mode_cmd)
+{
+ struct drm_i915_private *i915 = to_i915(obj->base.dev);
+ unsigned int tiling, stride;
+
+ i915_gem_object_lock(obj, NULL);
+ tiling = i915_gem_object_get_tiling(obj);
+ stride = i915_gem_object_get_stride(obj);
+ i915_gem_object_unlock(obj);
+
+ if (mode_cmd->flags & DRM_MODE_FB_MODIFIERS) {
+ /*
+ * If there's a fence, enforce that
+ * the fb modifier and tiling mode match.
+ */
+ if (tiling != I915_TILING_NONE &&
+ tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) {
+ drm_dbg_kms(&i915->drm,
+ "tiling_mode doesn't match fb modifier\n");
+ return -EINVAL;
+ }
+ } else {
+ if (tiling == I915_TILING_X) {
+ mode_cmd->modifier[0] = I915_FORMAT_MOD_X_TILED;
+ } else if (tiling == I915_TILING_Y) {
+ drm_dbg_kms(&i915->drm,
+ "No Y tiling for legacy addfb\n");
+ return -EINVAL;
+ }
+ }
+
+ /*
+ * gen2/3 display engine uses the fence if present,
+ * so the tiling mode must match the fb modifier exactly.
+ */
+ if (DISPLAY_VER(i915) < 4 &&
+ tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) {
+ drm_dbg_kms(&i915->drm,
+ "tiling_mode must match fb modifier exactly on gen2/3\n");
+ return -EINVAL;
+ }
+
+ /*
+ * If there's a fence, enforce that
+ * the fb pitch and fence stride match.
+ */
+ if (tiling != I915_TILING_NONE && mode_cmd->pitches[0] != stride) {
+ drm_dbg_kms(&i915->drm,
+ "pitch (%d) must match tiling stride (%d)\n",
+ mode_cmd->pitches[0], stride);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+struct drm_i915_gem_object *
+intel_fb_bo_lookup_valid_bo(struct drm_i915_private *i915,
+ struct drm_file *filp,
+ const struct drm_mode_fb_cmd2 *mode_cmd)
+{
+ struct drm_i915_gem_object *obj;
+
+ obj = i915_gem_object_lookup(filp, mode_cmd->handles[0]);
+ if (!obj)
+ return ERR_PTR(-ENOENT);
+
+ /* object is backed with LMEM for discrete */
+ if (HAS_LMEM(i915) && !i915_gem_object_can_migrate(obj, INTEL_REGION_LMEM_0)) {
+ /* object is "remote", not in local memory */
+ i915_gem_object_put(obj);
+ drm_dbg_kms(&i915->drm, "framebuffer must reside in local memory\n");
+ return ERR_PTR(-EREMOTE);
+ }
+
+ return obj;
+}
diff --git a/drivers/gpu/drm/i915/display/intel_fb_bo.h b/drivers/gpu/drm/i915/display/intel_fb_bo.h
new file mode 100644
index 000000000000..232bf898b013
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_fb_bo.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2021 Intel Corporation
+ */
+
+#ifndef __INTEL_FB_BO_H__
+#define __INTEL_FB_BO_H__
+
+struct drm_file;
+struct drm_mode_fb_cmd2;
+struct drm_i915_gem_object;
+struct drm_i915_private;
+struct intel_framebuffer;
+
+void intel_fb_bo_framebuffer_fini(struct drm_i915_gem_object *obj);
+
+int intel_fb_bo_framebuffer_init(struct intel_framebuffer *intel_fb,
+ struct drm_i915_gem_object *obj,
+ struct drm_mode_fb_cmd2 *mode_cmd);
+
+struct drm_i915_gem_object *
+intel_fb_bo_lookup_valid_bo(struct drm_i915_private *i915,
+ struct drm_file *filp,
+ const struct drm_mode_fb_cmd2 *user_mode_cmd);
+
+#endif
diff --git a/drivers/gpu/drm/i915/display/intel_fbc.c b/drivers/gpu/drm/i915/display/intel_fbc.c
index 63f389a1707d..f17a1afb4929 100644
--- a/drivers/gpu/drm/i915/display/intel_fbc.c
+++ b/drivers/gpu/drm/i915/display/intel_fbc.c
@@ -1235,7 +1235,7 @@ static int intel_fbc_check_plane(struct intel_atomic_state *state,
* Recommendation is to keep this combination disabled
* Bspec: 50422 HSD: 14010260002
*/
- if (DISPLAY_VER(i915) >= 12 && crtc_state->has_psr2) {
+ if (IS_DISPLAY_VER(i915, 12, 14) && crtc_state->has_psr2) {
plane_state->no_fbc_reason = "PSR2 enabled";
return 0;
}
diff --git a/drivers/gpu/drm/i915/display/intel_fbdev.c b/drivers/gpu/drm/i915/display/intel_fbdev.c
index 31d0d695d567..99894a855ef0 100644
--- a/drivers/gpu/drm/i915/display/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/display/intel_fbdev.c
@@ -43,7 +43,6 @@
#include <drm/drm_fourcc.h>
#include <drm/drm_gem_framebuffer_helper.h>
-#include "gem/i915_gem_lmem.h"
#include "gem/i915_gem_mman.h"
#include "i915_drv.h"
@@ -51,6 +50,7 @@
#include "intel_fb.h"
#include "intel_fb_pin.h"
#include "intel_fbdev.h"
+#include "intel_fbdev_fb.h"
#include "intel_frontbuffer.h"
struct intel_fbdev {
@@ -146,65 +146,6 @@ static const struct fb_ops intelfb_ops = {
.fb_mmap = intel_fbdev_mmap,
};
-static int intelfb_alloc(struct drm_fb_helper *helper,
- struct drm_fb_helper_surface_size *sizes)
-{
- struct intel_fbdev *ifbdev = to_intel_fbdev(helper);
- struct drm_framebuffer *fb;
- struct drm_device *dev = helper->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct drm_mode_fb_cmd2 mode_cmd = {};
- struct drm_i915_gem_object *obj;
- int size;
-
- /* we don't do packed 24bpp */
- if (sizes->surface_bpp == 24)
- sizes->surface_bpp = 32;
-
- mode_cmd.width = sizes->surface_width;
- mode_cmd.height = sizes->surface_height;
-
- mode_cmd.pitches[0] = ALIGN(mode_cmd.width *
- DIV_ROUND_UP(sizes->surface_bpp, 8), 64);
- mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
- sizes->surface_depth);
-
- size = mode_cmd.pitches[0] * mode_cmd.height;
- size = PAGE_ALIGN(size);
-
- obj = ERR_PTR(-ENODEV);
- if (HAS_LMEM(dev_priv)) {
- obj = i915_gem_object_create_lmem(dev_priv, size,
- I915_BO_ALLOC_CONTIGUOUS |
- I915_BO_ALLOC_USER);
- } else {
- /*
- * If the FB is too big, just don't use it since fbdev is not very
- * important and we should probably use that space with FBC or other
- * features.
- *
- * Also skip stolen on MTL as Wa_22018444074 mitigation.
- */
- if (!(IS_METEORLAKE(dev_priv)) && size * 2 < dev_priv->dsm.usable_size)
- obj = i915_gem_object_create_stolen(dev_priv, size);
- if (IS_ERR(obj))
- obj = i915_gem_object_create_shmem(dev_priv, size);
- }
-
- if (IS_ERR(obj)) {
- drm_err(&dev_priv->drm, "failed to allocate framebuffer (%pe)\n", obj);
- return PTR_ERR(obj);
- }
-
- fb = intel_framebuffer_create(obj, &mode_cmd);
- i915_gem_object_put(obj);
- if (IS_ERR(fb))
- return PTR_ERR(fb);
-
- ifbdev->fb = to_intel_framebuffer(fb);
- return 0;
-}
-
static int intelfb_create(struct drm_fb_helper *helper,
struct drm_fb_helper_surface_size *sizes)
{
@@ -213,7 +154,6 @@ static int intelfb_create(struct drm_fb_helper *helper,
struct drm_device *dev = helper->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
- struct i915_ggtt *ggtt = to_gt(dev_priv)->ggtt;
const struct i915_gtt_view view = {
.type = I915_GTT_VIEW_NORMAL,
};
@@ -222,9 +162,7 @@ static int intelfb_create(struct drm_fb_helper *helper,
struct i915_vma *vma;
unsigned long flags = 0;
bool prealloc = false;
- void __iomem *vaddr;
struct drm_i915_gem_object *obj;
- struct i915_gem_ww_ctx ww;
int ret;
mutex_lock(&ifbdev->hpd_lock);
@@ -245,12 +183,13 @@ static int intelfb_create(struct drm_fb_helper *helper,
intel_fb = ifbdev->fb = NULL;
}
if (!intel_fb || drm_WARN_ON(dev, !intel_fb_obj(&intel_fb->base))) {
+ struct drm_framebuffer *fb;
drm_dbg_kms(&dev_priv->drm,
"no BIOS fb, allocating a new one\n");
- ret = intelfb_alloc(helper, sizes);
- if (ret)
- return ret;
- intel_fb = ifbdev->fb;
+ fb = intel_fbdev_fb_alloc(helper, sizes);
+ if (IS_ERR(fb))
+ return PTR_ERR(fb);
+ intel_fb = ifbdev->fb = to_intel_framebuffer(fb);
} else {
drm_dbg_kms(&dev_priv->drm, "re-using BIOS fb\n");
prealloc = true;
@@ -283,49 +222,18 @@ static int intelfb_create(struct drm_fb_helper *helper,
info->fbops = &intelfb_ops;
obj = intel_fb_obj(&intel_fb->base);
- if (i915_gem_object_is_lmem(obj)) {
- struct intel_memory_region *mem = obj->mm.region;
-
- /* Use fbdev's framebuffer from lmem for discrete */
- info->fix.smem_start =
- (unsigned long)(mem->io_start +
- i915_gem_object_get_dma_address(obj, 0));
- info->fix.smem_len = obj->base.size;
- } else {
- /* Our framebuffer is the entirety of fbdev's system memory */
- info->fix.smem_start =
- (unsigned long)(ggtt->gmadr.start + i915_ggtt_offset(vma));
- info->fix.smem_len = vma->size;
- }
-
- for_i915_gem_ww(&ww, ret, false) {
- ret = i915_gem_object_lock(vma->obj, &ww);
-
- if (ret)
- continue;
-
- vaddr = i915_vma_pin_iomap(vma);
- if (IS_ERR(vaddr)) {
- drm_err(&dev_priv->drm,
- "Failed to remap framebuffer into virtual memory (%pe)\n", vaddr);
- ret = PTR_ERR(vaddr);
- continue;
- }
- }
+ ret = intel_fbdev_fb_fill_info(dev_priv, info, obj, vma);
if (ret)
goto out_unpin;
- info->screen_base = vaddr;
- info->screen_size = vma->size;
-
drm_fb_helper_fill_info(info, &ifbdev->helper, sizes);
/* If the object is shmemfs backed, it will have given us zeroed pages.
* If the object is stolen however, it will be full of whatever
* garbage was left in there.
*/
- if (!i915_gem_object_is_shmem(vma->obj) && !prealloc)
+ if (!i915_gem_object_is_shmem(obj) && !prealloc)
memset_io(info->screen_base, 0, info->screen_size);
/* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
@@ -424,12 +332,12 @@ static bool intel_fbdev_init_bios(struct drm_device *dev,
continue;
}
- if (obj->base.size > max_size) {
+ if (intel_bo_to_drm_bo(obj)->size > max_size) {
drm_dbg_kms(&i915->drm,
"found possible fb from [PLANE:%d:%s]\n",
plane->base.base.id, plane->base.name);
fb = to_intel_framebuffer(plane_state->uapi.fb);
- max_size = obj->base.size;
+ max_size = intel_bo_to_drm_bo(obj)->size;
}
}
diff --git a/drivers/gpu/drm/i915/display/intel_fbdev_fb.c b/drivers/gpu/drm/i915/display/intel_fbdev_fb.c
new file mode 100644
index 000000000000..717c3a3237c4
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_fbdev_fb.c
@@ -0,0 +1,115 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#include <drm/drm_fb_helper.h>
+
+#include "gem/i915_gem_lmem.h"
+
+#include "i915_drv.h"
+#include "intel_display_types.h"
+#include "intel_fbdev_fb.h"
+
+struct drm_framebuffer *intel_fbdev_fb_alloc(struct drm_fb_helper *helper,
+ struct drm_fb_helper_surface_size *sizes)
+{
+ struct drm_framebuffer *fb;
+ struct drm_device *dev = helper->dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_mode_fb_cmd2 mode_cmd = {};
+ struct drm_i915_gem_object *obj;
+ int size;
+
+ /* we don't do packed 24bpp */
+ if (sizes->surface_bpp == 24)
+ sizes->surface_bpp = 32;
+
+ mode_cmd.width = sizes->surface_width;
+ mode_cmd.height = sizes->surface_height;
+
+ mode_cmd.pitches[0] = ALIGN(mode_cmd.width *
+ DIV_ROUND_UP(sizes->surface_bpp, 8), 64);
+ mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
+ sizes->surface_depth);
+
+ size = mode_cmd.pitches[0] * mode_cmd.height;
+ size = PAGE_ALIGN(size);
+
+ obj = ERR_PTR(-ENODEV);
+ if (HAS_LMEM(dev_priv)) {
+ obj = i915_gem_object_create_lmem(dev_priv, size,
+ I915_BO_ALLOC_CONTIGUOUS |
+ I915_BO_ALLOC_USER);
+ } else {
+ /*
+ * If the FB is too big, just don't use it since fbdev is not very
+ * important and we should probably use that space with FBC or other
+ * features.
+ *
+ * Also skip stolen on MTL as Wa_22018444074 mitigation.
+ */
+ if (!(IS_METEORLAKE(dev_priv)) && size * 2 < dev_priv->dsm.usable_size)
+ obj = i915_gem_object_create_stolen(dev_priv, size);
+ if (IS_ERR(obj))
+ obj = i915_gem_object_create_shmem(dev_priv, size);
+ }
+
+ if (IS_ERR(obj)) {
+ drm_err(&dev_priv->drm, "failed to allocate framebuffer (%pe)\n", obj);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ fb = intel_framebuffer_create(obj, &mode_cmd);
+ i915_gem_object_put(obj);
+
+ return fb;
+}
+
+int intel_fbdev_fb_fill_info(struct drm_i915_private *i915, struct fb_info *info,
+ struct drm_i915_gem_object *obj, struct i915_vma *vma)
+{
+ struct i915_gem_ww_ctx ww;
+ void __iomem *vaddr;
+ int ret;
+
+ if (i915_gem_object_is_lmem(obj)) {
+ struct intel_memory_region *mem = obj->mm.region;
+
+ /* Use fbdev's framebuffer from lmem for discrete */
+ info->fix.smem_start =
+ (unsigned long)(mem->io_start +
+ i915_gem_object_get_dma_address(obj, 0));
+ info->fix.smem_len = obj->base.size;
+ } else {
+ struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
+
+ /* Our framebuffer is the entirety of fbdev's system memory */
+ info->fix.smem_start =
+ (unsigned long)(ggtt->gmadr.start + i915_ggtt_offset(vma));
+ info->fix.smem_len = vma->size;
+ }
+
+ for_i915_gem_ww(&ww, ret, false) {
+ ret = i915_gem_object_lock(vma->obj, &ww);
+
+ if (ret)
+ continue;
+
+ vaddr = i915_vma_pin_iomap(vma);
+ if (IS_ERR(vaddr)) {
+ drm_err(&i915->drm,
+ "Failed to remap framebuffer into virtual memory (%pe)\n", vaddr);
+ ret = PTR_ERR(vaddr);
+ continue;
+ }
+ }
+
+ if (ret)
+ return ret;
+
+ info->screen_base = vaddr;
+ info->screen_size = intel_bo_to_drm_bo(obj)->size;
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/i915/display/intel_fbdev_fb.h b/drivers/gpu/drm/i915/display/intel_fbdev_fb.h
new file mode 100644
index 000000000000..a395b2c65d33
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_fbdev_fb.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#ifndef __INTEL_FBDEV_FB_H__
+#define __INTEL_FBDEV_FB_H__
+
+struct drm_fb_helper;
+struct drm_fb_helper_surface_size;
+struct drm_i915_gem_object;
+struct drm_i915_private;
+struct fb_info;
+struct i915_vma;
+
+struct drm_framebuffer *intel_fbdev_fb_alloc(struct drm_fb_helper *helper,
+ struct drm_fb_helper_surface_size *sizes);
+int intel_fbdev_fb_fill_info(struct drm_i915_private *i915, struct fb_info *info,
+ struct drm_i915_gem_object *obj, struct i915_vma *vma);
+
+#endif
diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.c b/drivers/gpu/drm/i915/display/intel_hdmi.c
index ab18cfc19c0a..39e4f5f7c817 100644
--- a/drivers/gpu/drm/i915/display/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/display/intel_hdmi.c
@@ -1983,6 +1983,10 @@ intel_hdmi_mode_valid(struct drm_connector *connector,
bool ycbcr_420_only;
enum intel_output_format sink_format;
+ status = intel_cpu_transcoder_mode_valid(dev_priv, mode);
+ if (status != MODE_OK)
+ return status;
+
if ((mode->flags & DRM_MODE_FLAG_3D_MASK) == DRM_MODE_FLAG_3D_FRAME_PACKING)
clock *= 2;
diff --git a/drivers/gpu/drm/i915/display/intel_lvds.c b/drivers/gpu/drm/i915/display/intel_lvds.c
index 4b114fde57b1..221f5c6c871b 100644
--- a/drivers/gpu/drm/i915/display/intel_lvds.c
+++ b/drivers/gpu/drm/i915/display/intel_lvds.c
@@ -185,7 +185,7 @@ static void intel_lvds_pps_get_hw_state(struct drm_i915_private *dev_priv,
/* Convert from 100ms to 100us units */
pps->t4 = val * 1000;
- if (DISPLAY_VER(dev_priv) <= 4 &&
+ if (DISPLAY_VER(dev_priv) < 5 &&
pps->t1_t2 == 0 && pps->t5 == 0 && pps->t3 == 0 && pps->tx == 0) {
drm_dbg_kms(&dev_priv->drm,
"Panel power timings uninitialized, "
@@ -389,11 +389,16 @@ intel_lvds_mode_valid(struct drm_connector *_connector,
struct drm_display_mode *mode)
{
struct intel_connector *connector = to_intel_connector(_connector);
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
const struct drm_display_mode *fixed_mode =
intel_panel_fixed_mode(connector, mode);
int max_pixclk = to_i915(connector->base.dev)->max_dotclk_freq;
enum drm_mode_status status;
+ status = intel_cpu_transcoder_mode_valid(i915, mode);
+ if (status != MODE_OK)
+ return status;
+
if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
return MODE_NO_DBLESCAN;
diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c
index 15c1804dcd59..6029bb71276c 100644
--- a/drivers/gpu/drm/i915/display/intel_psr.c
+++ b/drivers/gpu/drm/i915/display/intel_psr.c
@@ -806,10 +806,10 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp)
val |= EDP_PSR2_IDLE_FRAMES(psr_compute_idle_frames(intel_dp));
- if (DISPLAY_VER(dev_priv) <= 13 && !IS_ALDERLAKE_P(dev_priv))
+ if (DISPLAY_VER(dev_priv) < 14 && !IS_ALDERLAKE_P(dev_priv))
val |= EDP_SU_TRACK_ENABLE;
- if (DISPLAY_VER(dev_priv) >= 10 && DISPLAY_VER(dev_priv) <= 12)
+ if (DISPLAY_VER(dev_priv) >= 10 && DISPLAY_VER(dev_priv) < 13)
val |= EDP_Y_COORDINATE_ENABLE;
val |= EDP_PSR2_FRAME_BEFORE_SU(frames_before_su_entry(intel_dp));
@@ -1094,7 +1094,7 @@ static bool _compute_psr2_sdp_prior_scanline_indication(struct intel_dp *intel_d
return true;
/* Not supported <13 / Wa_22012279113:adl-p */
- if (DISPLAY_VER(dev_priv) <= 13 || intel_dp->edp_dpcd[0] < DP_EDP_14b)
+ if (DISPLAY_VER(dev_priv) < 14 || intel_dp->edp_dpcd[0] < DP_EDP_14b)
return false;
crtc_state->req_psr2_sdp_prior_scanline = true;
@@ -1221,7 +1221,7 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
* over PSR2.
*/
if (crtc_state->dsc.compression_enable &&
- (DISPLAY_VER(dev_priv) <= 13 && !IS_ALDERLAKE_P(dev_priv))) {
+ (DISPLAY_VER(dev_priv) < 14 && !IS_ALDERLAKE_P(dev_priv))) {
drm_dbg_kms(&dev_priv->drm,
"PSR2 cannot be enabled since DSC is enabled\n");
return false;
@@ -1917,81 +1917,6 @@ static void psr_force_hw_tracking_exit(struct intel_dp *intel_dp)
intel_de_write(dev_priv, CURSURFLIVE(intel_dp->psr.pipe), 0);
}
-void intel_psr2_disable_plane_sel_fetch_arm(struct intel_plane *plane,
- const struct intel_crtc_state *crtc_state)
-{
- struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
- enum pipe pipe = plane->pipe;
-
- if (!crtc_state->enable_psr2_sel_fetch)
- return;
-
- intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_CTL(pipe, plane->id), 0);
-}
-
-void intel_psr2_program_plane_sel_fetch_arm(struct intel_plane *plane,
- const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state)
-{
- struct drm_i915_private *i915 = to_i915(plane->base.dev);
- enum pipe pipe = plane->pipe;
-
- if (!crtc_state->enable_psr2_sel_fetch)
- return;
-
- if (plane->id == PLANE_CURSOR)
- intel_de_write_fw(i915, PLANE_SEL_FETCH_CTL(pipe, plane->id),
- plane_state->ctl);
- else
- intel_de_write_fw(i915, PLANE_SEL_FETCH_CTL(pipe, plane->id),
- PLANE_SEL_FETCH_CTL_ENABLE);
-}
-
-void intel_psr2_program_plane_sel_fetch_noarm(struct intel_plane *plane,
- const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state,
- int color_plane)
-{
- struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
- enum pipe pipe = plane->pipe;
- const struct drm_rect *clip;
- u32 val;
- int x, y;
-
- if (!crtc_state->enable_psr2_sel_fetch)
- return;
-
- if (plane->id == PLANE_CURSOR)
- return;
-
- clip = &plane_state->psr2_sel_fetch_area;
-
- val = (clip->y1 + plane_state->uapi.dst.y1) << 16;
- val |= plane_state->uapi.dst.x1;
- intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_POS(pipe, plane->id), val);
-
- x = plane_state->view.color_plane[color_plane].x;
-
- /*
- * From Bspec: UV surface Start Y Position = half of Y plane Y
- * start position.
- */
- if (!color_plane)
- y = plane_state->view.color_plane[color_plane].y + clip->y1;
- else
- y = plane_state->view.color_plane[color_plane].y + clip->y1 / 2;
-
- val = y << 16 | x;
-
- intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_OFFSET(pipe, plane->id),
- val);
-
- /* Sizes are 0 based */
- val = (drm_rect_height(clip) - 1) << 16;
- val |= (drm_rect_width(&plane_state->uapi.src) >> 16) - 1;
- intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_SIZE(pipe, plane->id), val);
-}
-
void intel_psr2_program_trans_man_trk_ctl(const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
@@ -2251,8 +2176,19 @@ int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
continue;
inter = pipe_clip;
- if (!drm_rect_intersect(&inter, &new_plane_state->uapi.dst))
+ sel_fetch_area = &new_plane_state->psr2_sel_fetch_area;
+ if (!drm_rect_intersect(&inter, &new_plane_state->uapi.dst)) {
+ sel_fetch_area->y1 = -1;
+ sel_fetch_area->y2 = -1;
+ /*
+ * if plane sel fetch was previously enabled ->
+ * disable it
+ */
+ if (drm_rect_height(&old_plane_state->psr2_sel_fetch_area) > 0)
+ crtc_state->update_planes |= BIT(plane->id);
+
continue;
+ }
if (!psr2_sel_fetch_plane_state_supported(new_plane_state)) {
full_update = true;
diff --git a/drivers/gpu/drm/i915/display/intel_psr.h b/drivers/gpu/drm/i915/display/intel_psr.h
index 6a1f4573852b..143e0595c097 100644
--- a/drivers/gpu/drm/i915/display/intel_psr.h
+++ b/drivers/gpu/drm/i915/display/intel_psr.h
@@ -55,16 +55,6 @@ bool intel_psr_enabled(struct intel_dp *intel_dp);
int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
struct intel_crtc *crtc);
void intel_psr2_program_trans_man_trk_ctl(const struct intel_crtc_state *crtc_state);
-void intel_psr2_program_plane_sel_fetch_noarm(struct intel_plane *plane,
- const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state,
- int color_plane);
-void intel_psr2_program_plane_sel_fetch_arm(struct intel_plane *plane,
- const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state);
-
-void intel_psr2_disable_plane_sel_fetch_arm(struct intel_plane *plane,
- const struct intel_crtc_state *crtc_state);
void intel_psr_pause(struct intel_dp *intel_dp);
void intel_psr_resume(struct intel_dp *intel_dp);
diff --git a/drivers/gpu/drm/i915/display/intel_sdvo.c b/drivers/gpu/drm/i915/display/intel_sdvo.c
index bcb4959df70d..9218047495fb 100644
--- a/drivers/gpu/drm/i915/display/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/display/intel_sdvo.c
@@ -1931,13 +1931,19 @@ static enum drm_mode_status
intel_sdvo_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
+ struct drm_i915_private *i915 = to_i915(connector->dev);
struct intel_sdvo *intel_sdvo = intel_attached_sdvo(to_intel_connector(connector));
struct intel_sdvo_connector *intel_sdvo_connector =
to_intel_sdvo_connector(connector);
- int max_dotclk = to_i915(connector->dev)->max_dotclk_freq;
bool has_hdmi_sink = intel_has_hdmi_sink(intel_sdvo_connector, connector->state);
+ int max_dotclk = i915->max_dotclk_freq;
+ enum drm_mode_status status;
int clock = mode->clock;
+ status = intel_cpu_transcoder_mode_valid(i915, mode);
+ if (status != MODE_OK)
+ return status;
+
if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
return MODE_NO_DBLESCAN;
diff --git a/drivers/gpu/drm/i915/display/intel_snps_phy.c b/drivers/gpu/drm/i915/display/intel_snps_phy.c
index ce5a73a4cc89..bc61e736f9b3 100644
--- a/drivers/gpu/drm/i915/display/intel_snps_phy.c
+++ b/drivers/gpu/drm/i915/display/intel_snps_phy.c
@@ -3,7 +3,7 @@
* Copyright © 2019 Intel Corporation
*/
-#include <linux/util_macros.h>
+#include <linux/math.h>
#include "i915_reg.h"
#include "intel_ddi.h"
diff --git a/drivers/gpu/drm/i915/display/intel_tv.c b/drivers/gpu/drm/i915/display/intel_tv.c
index 31a79fdfc812..2ee4f0d95851 100644
--- a/drivers/gpu/drm/i915/display/intel_tv.c
+++ b/drivers/gpu/drm/i915/display/intel_tv.c
@@ -958,8 +958,14 @@ static enum drm_mode_status
intel_tv_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
+ struct drm_i915_private *i915 = to_i915(connector->dev);
const struct tv_mode *tv_mode = intel_tv_mode_find(connector->state);
- int max_dotclk = to_i915(connector->dev)->max_dotclk_freq;
+ int max_dotclk = i915->max_dotclk_freq;
+ enum drm_mode_status status;
+
+ status = intel_cpu_transcoder_mode_valid(i915, mode);
+ if (status != MODE_OK)
+ return status;
if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
return MODE_NO_DBLESCAN;
diff --git a/drivers/gpu/drm/i915/display/intel_vblank.c b/drivers/gpu/drm/i915/display/intel_vblank.c
index 2cec2abf9746..fe256bf7b485 100644
--- a/drivers/gpu/drm/i915/display/intel_vblank.c
+++ b/drivers/gpu/drm/i915/display/intel_vblank.c
@@ -265,6 +265,32 @@ int intel_crtc_scanline_to_hw(struct intel_crtc *crtc, int scanline)
return (scanline + vtotal - crtc->scanline_offset) % vtotal;
}
+/*
+ * The uncore version of the spin lock functions is used to decide
+ * whether we need to lock the uncore lock or not. This is only
+ * needed in i915, not in Xe.
+ *
+ * This lock in i915 is needed because some old platforms (at least
+ * IVB and possibly HSW as well), which are not supported in Xe, need
+ * all register accesses to the same cacheline to be serialized,
+ * otherwise they may hang.
+ */
+static void intel_vblank_section_enter(struct drm_i915_private *i915)
+ __acquires(i915->uncore.lock)
+{
+#ifdef I915
+ spin_lock(&i915->uncore.lock);
+#endif
+}
+
+static void intel_vblank_section_exit(struct drm_i915_private *i915)
+ __releases(i915->uncore.lock)
+{
+#ifdef I915
+ spin_unlock(&i915->uncore.lock);
+#endif
+}
+
static bool i915_get_crtc_scanoutpos(struct drm_crtc *_crtc,
bool in_vblank_irq,
int *vpos, int *hpos,
@@ -302,11 +328,12 @@ static bool i915_get_crtc_scanoutpos(struct drm_crtc *_crtc,
}
/*
- * Lock uncore.lock, as we will do multiple timing critical raw
- * register reads, potentially with preemption disabled, so the
- * following code must not block on uncore.lock.
+ * Enter vblank critical section, as we will do multiple
+ * timing critical raw register reads, potentially with
+ * preemption disabled, so the following code must not block.
*/
- spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+ local_irq_save(irqflags);
+ intel_vblank_section_enter(dev_priv);
/* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */
@@ -374,7 +401,8 @@ static bool i915_get_crtc_scanoutpos(struct drm_crtc *_crtc,
/* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */
- spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
+ intel_vblank_section_exit(dev_priv);
+ local_irq_restore(irqflags);
/*
* While in vblank, position will be negative
@@ -412,9 +440,13 @@ int intel_get_crtc_scanline(struct intel_crtc *crtc)
unsigned long irqflags;
int position;
- spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+ local_irq_save(irqflags);
+ intel_vblank_section_enter(dev_priv);
+
position = __intel_get_crtc_scanline(crtc);
- spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
+
+ intel_vblank_section_exit(dev_priv);
+ local_irq_restore(irqflags);
return position;
}
@@ -537,7 +569,7 @@ void intel_crtc_update_active_timings(const struct intel_crtc_state *crtc_state,
* Need to audit everything to make sure it's safe.
*/
spin_lock_irqsave(&i915->drm.vblank_time_lock, irqflags);
- spin_lock(&i915->uncore.lock);
+ intel_vblank_section_enter(i915);
drm_calc_timestamping_constants(&crtc->base, &adjusted_mode);
@@ -546,7 +578,6 @@ void intel_crtc_update_active_timings(const struct intel_crtc_state *crtc_state,
crtc->mode_flags = mode_flags;
crtc->scanline_offset = intel_crtc_scanline_offset(crtc_state);
-
- spin_unlock(&i915->uncore.lock);
+ intel_vblank_section_exit(i915);
spin_unlock_irqrestore(&i915->drm.vblank_time_lock, irqflags);
}
diff --git a/drivers/gpu/drm/i915/display/skl_universal_plane.c b/drivers/gpu/drm/i915/display/skl_universal_plane.c
index f5c77a018e10..511dc1544854 100644
--- a/drivers/gpu/drm/i915/display/skl_universal_plane.c
+++ b/drivers/gpu/drm/i915/display/skl_universal_plane.c
@@ -18,6 +18,7 @@
#include "intel_fbc.h"
#include "intel_frontbuffer.h"
#include "intel_psr.h"
+#include "intel_psr_regs.h"
#include "skl_scaler.h"
#include "skl_universal_plane.h"
#include "skl_watermark.h"
@@ -629,6 +630,18 @@ skl_plane_disable_arm(struct intel_plane *plane,
intel_de_write_fw(dev_priv, PLANE_SURF(pipe, plane_id), 0);
}
+static void icl_plane_disable_sel_fetch_arm(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *i915 = to_i915(plane->base.dev);
+ enum pipe pipe = plane->pipe;
+
+ if (!crtc_state->enable_psr2_sel_fetch)
+ return;
+
+ intel_de_write_fw(i915, PLANE_SEL_FETCH_CTL(pipe, plane->id), 0);
+}
+
static void
icl_plane_disable_arm(struct intel_plane *plane,
const struct intel_crtc_state *crtc_state)
@@ -642,7 +655,7 @@ icl_plane_disable_arm(struct intel_plane *plane,
skl_write_plane_wm(plane, crtc_state);
- intel_psr2_disable_plane_sel_fetch_arm(plane, crtc_state);
+ icl_plane_disable_sel_fetch_arm(plane, crtc_state);
intel_de_write_fw(dev_priv, PLANE_CTL(pipe, plane_id), 0);
intel_de_write_fw(dev_priv, PLANE_SURF(pipe, plane_id), 0);
}
@@ -1197,6 +1210,48 @@ skl_plane_update_arm(struct intel_plane *plane,
skl_plane_surf(plane_state, 0));
}
+static void icl_plane_update_sel_fetch_noarm(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state,
+ int color_plane)
+{
+ struct drm_i915_private *i915 = to_i915(plane->base.dev);
+ enum pipe pipe = plane->pipe;
+ const struct drm_rect *clip;
+ u32 val;
+ int x, y;
+
+ if (!crtc_state->enable_psr2_sel_fetch)
+ return;
+
+ clip = &plane_state->psr2_sel_fetch_area;
+
+ val = (clip->y1 + plane_state->uapi.dst.y1) << 16;
+ val |= plane_state->uapi.dst.x1;
+ intel_de_write_fw(i915, PLANE_SEL_FETCH_POS(pipe, plane->id), val);
+
+ x = plane_state->view.color_plane[color_plane].x;
+
+ /*
+ * From Bspec: UV surface Start Y Position = half of Y plane Y
+ * start position.
+ */
+ if (!color_plane)
+ y = plane_state->view.color_plane[color_plane].y + clip->y1;
+ else
+ y = plane_state->view.color_plane[color_plane].y + clip->y1 / 2;
+
+ val = y << 16 | x;
+
+ intel_de_write_fw(i915, PLANE_SEL_FETCH_OFFSET(pipe, plane->id),
+ val);
+
+ /* Sizes are 0 based */
+ val = (drm_rect_height(clip) - 1) << 16;
+ val |= (drm_rect_width(&plane_state->uapi.src) >> 16) - 1;
+ intel_de_write_fw(i915, PLANE_SEL_FETCH_SIZE(pipe, plane->id), val);
+}
+
static void
icl_plane_update_noarm(struct intel_plane *plane,
const struct intel_crtc_state *crtc_state,
@@ -1269,7 +1324,24 @@ icl_plane_update_noarm(struct intel_plane *plane,
if (plane_state->force_black)
icl_plane_csc_load_black(plane);
- intel_psr2_program_plane_sel_fetch_noarm(plane, crtc_state, plane_state, color_plane);
+ icl_plane_update_sel_fetch_noarm(plane, crtc_state, plane_state, color_plane);
+}
+
+static void icl_plane_update_sel_fetch_arm(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
+{
+ struct drm_i915_private *i915 = to_i915(plane->base.dev);
+ enum pipe pipe = plane->pipe;
+
+ if (!crtc_state->enable_psr2_sel_fetch)
+ return;
+
+ if (drm_rect_height(&plane_state->psr2_sel_fetch_area) > 0)
+ intel_de_write_fw(i915, PLANE_SEL_FETCH_CTL(pipe, plane->id),
+ PLANE_SEL_FETCH_CTL_ENABLE);
+ else
+ icl_plane_disable_sel_fetch_arm(plane, crtc_state);
}
static void
@@ -1296,7 +1368,7 @@ icl_plane_update_arm(struct intel_plane *plane,
if (plane_state->scaler_id >= 0)
skl_program_plane_scaler(plane, crtc_state, plane_state);
- intel_psr2_program_plane_sel_fetch_arm(plane, crtc_state, plane_state);
+ icl_plane_update_sel_fetch_arm(plane, crtc_state, plane_state);
/*
* The control register self-arms if the plane was previously
diff --git a/drivers/gpu/drm/i915/display/vlv_dsi.c b/drivers/gpu/drm/i915/display/vlv_dsi.c
index bda49734ca33..9b33b8a74d64 100644
--- a/drivers/gpu/drm/i915/display/vlv_dsi.c
+++ b/drivers/gpu/drm/i915/display/vlv_dsi.c
@@ -1532,21 +1532,29 @@ static void intel_dsi_unprepare(struct intel_encoder *encoder)
}
}
-static void intel_dsi_encoder_destroy(struct drm_encoder *encoder)
+static const struct drm_encoder_funcs intel_dsi_funcs = {
+ .destroy = intel_encoder_destroy,
+};
+
+static enum drm_mode_status vlv_dsi_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
{
- struct intel_dsi *intel_dsi = enc_to_intel_dsi(to_intel_encoder(encoder));
+ struct drm_i915_private *i915 = to_i915(connector->dev);
- intel_dsi_vbt_gpio_cleanup(intel_dsi);
- intel_encoder_destroy(encoder);
-}
+ if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) {
+ enum drm_mode_status status;
-static const struct drm_encoder_funcs intel_dsi_funcs = {
- .destroy = intel_dsi_encoder_destroy,
-};
+ status = intel_cpu_transcoder_mode_valid(i915, mode);
+ if (status != MODE_OK)
+ return status;
+ }
+
+ return intel_dsi_mode_valid(connector, mode);
+}
static const struct drm_connector_helper_funcs intel_dsi_connector_helper_funcs = {
.get_modes = intel_dsi_get_modes,
- .mode_valid = intel_dsi_mode_valid,
+ .mode_valid = vlv_dsi_mode_valid,
.atomic_check = intel_digital_connector_atomic_check,
};
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c
index e38f06a6e56e..dcbfe32fd30c 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c
@@ -279,7 +279,8 @@ static int proto_context_set_protected(struct drm_i915_private *i915,
}
static struct i915_gem_proto_context *
-proto_context_create(struct drm_i915_private *i915, unsigned int flags)
+proto_context_create(struct drm_i915_file_private *fpriv,
+ struct drm_i915_private *i915, unsigned int flags)
{
struct i915_gem_proto_context *pc, *err;
@@ -287,6 +288,7 @@ proto_context_create(struct drm_i915_private *i915, unsigned int flags)
if (!pc)
return ERR_PTR(-ENOMEM);
+ pc->fpriv = fpriv;
pc->num_user_engines = -1;
pc->user_engines = NULL;
pc->user_flags = BIT(UCONTEXT_BANNABLE) |
@@ -1622,6 +1624,7 @@ i915_gem_create_context(struct drm_i915_private *i915,
err = PTR_ERR(ppgtt);
goto err_ctx;
}
+ ppgtt->vm.fpriv = pc->fpriv;
vm = &ppgtt->vm;
}
if (vm)
@@ -1741,7 +1744,7 @@ int i915_gem_context_open(struct drm_i915_private *i915,
/* 0 reserved for invalid/unassigned ppgtt */
xa_init_flags(&file_priv->vm_xa, XA_FLAGS_ALLOC1);
- pc = proto_context_create(i915, 0);
+ pc = proto_context_create(file_priv, i915, 0);
if (IS_ERR(pc)) {
err = PTR_ERR(pc);
goto err;
@@ -1823,6 +1826,7 @@ int i915_gem_vm_create_ioctl(struct drm_device *dev, void *data,
GEM_BUG_ON(id == 0); /* reserved for invalid/unassigned ppgtt */
args->vm_id = id;
+ ppgtt->vm.fpriv = file_priv;
return 0;
err_put:
@@ -2285,7 +2289,8 @@ int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
return -EIO;
}
- ext_data.pc = proto_context_create(i915, args->flags);
+ ext_data.pc = proto_context_create(file->driver_priv, i915,
+ args->flags);
if (IS_ERR(ext_data.pc))
return PTR_ERR(ext_data.pc);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context_types.h b/drivers/gpu/drm/i915/gem/i915_gem_context_types.h
index cb78214a7dcd..c573c067779f 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context_types.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context_types.h
@@ -188,6 +188,9 @@ struct i915_gem_proto_engine {
* CONTEXT_CREATE_SET_PARAM during GEM_CONTEXT_CREATE.
*/
struct i915_gem_proto_context {
+ /** @fpriv: Client which creates the context */
+ struct drm_i915_file_private *fpriv;
+
/** @vm: See &i915_gem_context.vm */
struct i915_address_space *vm;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
index 683fd8d3151c..81a57dd52dfd 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
@@ -9,6 +9,7 @@
#include <linux/sync_file.h>
#include <linux/uaccess.h>
+#include <drm/drm_auth.h>
#include <drm/drm_syncobj.h>
#include "display/intel_frontbuffer.h"
@@ -253,6 +254,8 @@ struct i915_execbuffer {
struct intel_gt *gt; /* gt for the execbuf */
struct intel_context *context; /* logical state for the request */
struct i915_gem_context *gem_context; /** caller's context */
+ intel_wakeref_t wakeref;
+ intel_wakeref_t wakeref_gt0;
/** our requests to build */
struct i915_request *requests[MAX_ENGINE_INSTANCE + 1];
@@ -1678,7 +1681,7 @@ static int eb_copy_relocations(const struct i915_execbuffer *eb)
urelocs = u64_to_user_ptr(eb->exec[i].relocs_ptr);
size = nreloc * sizeof(*relocs);
- relocs = kvmalloc_array(size, 1, GFP_KERNEL);
+ relocs = kvmalloc_array(1, size, GFP_KERNEL);
if (!relocs) {
err = -ENOMEM;
goto err;
@@ -2719,13 +2722,13 @@ eb_select_engine(struct i915_execbuffer *eb)
for_each_child(ce, child)
intel_context_get(child);
- intel_gt_pm_get(gt);
+ eb->wakeref = intel_gt_pm_get(ce->engine->gt);
/*
* Keep GT0 active on MTL so that i915_vma_parked() doesn't
* free VMAs while execbuf ioctl is validating VMAs.
*/
if (gt->info.id)
- intel_gt_pm_get(to_gt(gt->i915));
+ eb->wakeref_gt0 = intel_gt_pm_get(to_gt(gt->i915));
if (!test_bit(CONTEXT_ALLOC_BIT, &ce->flags)) {
err = intel_context_alloc_state(ce);
@@ -2765,9 +2768,9 @@ eb_select_engine(struct i915_execbuffer *eb)
err:
if (gt->info.id)
- intel_gt_pm_put(to_gt(gt->i915));
+ intel_gt_pm_put(to_gt(gt->i915), eb->wakeref_gt0);
- intel_gt_pm_put(gt);
+ intel_gt_pm_put(ce->engine->gt, eb->wakeref);
for_each_child(ce, child)
intel_context_put(child);
intel_context_put(ce);
@@ -2785,8 +2788,8 @@ eb_put_engine(struct i915_execbuffer *eb)
* i915_vma_parked() from interfering while execbuf validates vmas.
*/
if (eb->gt->info.id)
- intel_gt_pm_put(to_gt(eb->gt->i915));
- intel_gt_pm_put(eb->gt);
+ intel_gt_pm_put(to_gt(eb->gt->i915), eb->wakeref_gt0);
+ intel_gt_pm_put(eb->context->engine->gt, eb->wakeref);
for_each_child(eb->context, child)
intel_context_put(child);
intel_context_put(eb->context);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c b/drivers/gpu/drm/i915/gem/i915_gem_object.c
index c26d87555825..25eeeb863209 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c
@@ -106,6 +106,10 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
INIT_LIST_HEAD(&obj->mm.link);
+#ifdef CONFIG_PROC_FS
+ INIT_LIST_HEAD(&obj->client_link);
+#endif
+
INIT_LIST_HEAD(&obj->lut_list);
spin_lock_init(&obj->lut_lock);
@@ -293,6 +297,10 @@ void __i915_gem_free_object_rcu(struct rcu_head *head)
container_of(head, typeof(*obj), rcu);
struct drm_i915_private *i915 = to_i915(obj->base.dev);
+ /* We need to keep this alive for RCU read access from fdinfo. */
+ if (obj->mm.n_placements > 1)
+ kfree(obj->mm.placements);
+
i915_gem_object_free(obj);
GEM_BUG_ON(!atomic_read(&i915->mm.free_count));
@@ -389,9 +397,6 @@ void __i915_gem_free_object(struct drm_i915_gem_object *obj)
if (obj->ops->release)
obj->ops->release(obj);
- if (obj->mm.n_placements > 1)
- kfree(obj->mm.placements);
-
if (obj->shares_resv_from)
i915_vm_resv_put(obj->shares_resv_from);
@@ -442,6 +447,8 @@ static void i915_gem_free_object(struct drm_gem_object *gem_obj)
GEM_BUG_ON(i915_gem_object_is_framebuffer(obj));
+ i915_drm_client_remove_object(obj);
+
/*
* Before we free the object, make sure any pure RCU-only
* read-side critical sections are complete, e.g.
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
index 2292404007c8..0c5cdab278b6 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
@@ -302,6 +302,18 @@ struct drm_i915_gem_object {
*/
struct i915_address_space *shares_resv_from;
+#ifdef CONFIG_PROC_FS
+ /**
+ * @client: @i915_drm_client which created the object
+ */
+ struct i915_drm_client *client;
+
+ /**
+ * @client_link: Link into @i915_drm_client.objects_list
+ */
+ struct list_head client_link;
+#endif
+
union {
struct rcu_head rcu;
struct llist_node freed;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
index 1a766d8e7cce..8c88075eeab2 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
@@ -386,6 +386,27 @@ static void icl_get_stolen_reserved(struct drm_i915_private *i915,
drm_dbg(&i915->drm, "GEN6_STOLEN_RESERVED = 0x%016llx\n", reg_val);
+ /* Wa_14019821291 */
+ if (MEDIA_VER_FULL(i915) == IP_VER(13, 0)) {
+ /*
+ * This workaround is primarily implemented by the BIOS. We
+ * just need to figure out whether the BIOS has applied the
+ * workaround (meaning the programmed address falls within
+ * the DSM) and, if so, reserve that part of the DSM to
+ * prevent accidental reuse. The DSM location should be just
+ * below the WOPCM.
+ */
+ u64 gscpsmi_base = intel_uncore_read64_2x32(uncore,
+ MTL_GSCPSMI_BASEADDR_LSB,
+ MTL_GSCPSMI_BASEADDR_MSB);
+ if (gscpsmi_base >= i915->dsm.stolen.start &&
+ gscpsmi_base < i915->dsm.stolen.end) {
+ *base = gscpsmi_base;
+ *size = i915->dsm.stolen.end - gscpsmi_base;
+ return;
+ }
+ }
+
switch (reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK) {
case GEN8_STOLEN_RESERVED_1M:
*size = 1024 * 1024;
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c
index 3bef1beec7cb..3fd68a099a85 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c
@@ -85,6 +85,7 @@ out:
static int gtt_set(struct context *ctx, unsigned long offset, u32 v)
{
+ intel_wakeref_t wakeref;
struct i915_vma *vma;
u32 __iomem *map;
int err = 0;
@@ -99,7 +100,7 @@ static int gtt_set(struct context *ctx, unsigned long offset, u32 v)
if (IS_ERR(vma))
return PTR_ERR(vma);
- intel_gt_pm_get(vma->vm->gt);
+ wakeref = intel_gt_pm_get(vma->vm->gt);
map = i915_vma_pin_iomap(vma);
i915_vma_unpin(vma);
@@ -112,12 +113,13 @@ static int gtt_set(struct context *ctx, unsigned long offset, u32 v)
i915_vma_unpin_iomap(vma);
out_rpm:
- intel_gt_pm_put(vma->vm->gt);
+ intel_gt_pm_put(vma->vm->gt, wakeref);
return err;
}
static int gtt_get(struct context *ctx, unsigned long offset, u32 *v)
{
+ intel_wakeref_t wakeref;
struct i915_vma *vma;
u32 __iomem *map;
int err = 0;
@@ -132,7 +134,7 @@ static int gtt_get(struct context *ctx, unsigned long offset, u32 *v)
if (IS_ERR(vma))
return PTR_ERR(vma);
- intel_gt_pm_get(vma->vm->gt);
+ wakeref = intel_gt_pm_get(vma->vm->gt);
map = i915_vma_pin_iomap(vma);
i915_vma_unpin(vma);
@@ -145,7 +147,7 @@ static int gtt_get(struct context *ctx, unsigned long offset, u32 *v)
i915_vma_unpin_iomap(vma);
out_rpm:
- intel_gt_pm_put(vma->vm->gt);
+ intel_gt_pm_put(vma->vm->gt, wakeref);
return err;
}
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
index 72957a36a36b..2c51a2c452fc 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
@@ -630,14 +630,14 @@ static bool assert_mmap_offset(struct drm_i915_private *i915,
static void disable_retire_worker(struct drm_i915_private *i915)
{
i915_gem_driver_unregister__shrinker(i915);
- intel_gt_pm_get(to_gt(i915));
+ intel_gt_pm_get_untracked(to_gt(i915));
cancel_delayed_work_sync(&to_gt(i915)->requests.retire_work);
}
static void restore_retire_worker(struct drm_i915_private *i915)
{
igt_flush_test(i915);
- intel_gt_pm_put(to_gt(i915));
+ intel_gt_pm_put_untracked(to_gt(i915));
i915_gem_driver_register__shrinker(i915);
}
@@ -778,6 +778,7 @@ err_obj:
static int gtt_set(struct drm_i915_gem_object *obj)
{
+ intel_wakeref_t wakeref;
struct i915_vma *vma;
void __iomem *map;
int err = 0;
@@ -786,7 +787,7 @@ static int gtt_set(struct drm_i915_gem_object *obj)
if (IS_ERR(vma))
return PTR_ERR(vma);
- intel_gt_pm_get(vma->vm->gt);
+ wakeref = intel_gt_pm_get(vma->vm->gt);
map = i915_vma_pin_iomap(vma);
i915_vma_unpin(vma);
if (IS_ERR(map)) {
@@ -798,12 +799,13 @@ static int gtt_set(struct drm_i915_gem_object *obj)
i915_vma_unpin_iomap(vma);
out:
- intel_gt_pm_put(vma->vm->gt);
+ intel_gt_pm_put(vma->vm->gt, wakeref);
return err;
}
static int gtt_check(struct drm_i915_gem_object *obj)
{
+ intel_wakeref_t wakeref;
struct i915_vma *vma;
void __iomem *map;
int err = 0;
@@ -812,7 +814,7 @@ static int gtt_check(struct drm_i915_gem_object *obj)
if (IS_ERR(vma))
return PTR_ERR(vma);
- intel_gt_pm_get(vma->vm->gt);
+ wakeref = intel_gt_pm_get(vma->vm->gt);
map = i915_vma_pin_iomap(vma);
i915_vma_unpin(vma);
if (IS_ERR(map)) {
@@ -828,7 +830,7 @@ static int gtt_check(struct drm_i915_gem_object *obj)
i915_vma_unpin_iomap(vma);
out:
- intel_gt_pm_put(vma->vm->gt);
+ intel_gt_pm_put(vma->vm->gt, wakeref);
return err;
}
diff --git a/drivers/gpu/drm/i915/gem/selftests/mock_context.c b/drivers/gpu/drm/i915/gem/selftests/mock_context.c
index e199d7dbb876..2b0327cc47c2 100644
--- a/drivers/gpu/drm/i915/gem/selftests/mock_context.c
+++ b/drivers/gpu/drm/i915/gem/selftests/mock_context.c
@@ -83,7 +83,7 @@ live_context(struct drm_i915_private *i915, struct file *file)
int err;
u32 id;
- pc = proto_context_create(i915, 0);
+ pc = proto_context_create(fpriv, i915, 0);
if (IS_ERR(pc))
return ERR_CAST(pc);
@@ -152,7 +152,7 @@ kernel_context(struct drm_i915_private *i915,
struct i915_gem_context *ctx;
struct i915_gem_proto_context *pc;
- pc = proto_context_create(i915, 0);
+ pc = proto_context_create(NULL, i915, 0);
if (IS_ERR(pc))
return ERR_CAST(pc);
diff --git a/drivers/gpu/drm/i915/gt/gen8_ppgtt.c b/drivers/gpu/drm/i915/gt/gen8_ppgtt.c
index 9895e18df043..fa46d2308b0e 100644
--- a/drivers/gpu/drm/i915/gt/gen8_ppgtt.c
+++ b/drivers/gpu/drm/i915/gt/gen8_ppgtt.c
@@ -5,6 +5,7 @@
#include <linux/log2.h>
+#include "gem/i915_gem_internal.h"
#include "gem/i915_gem_lmem.h"
#include "gen8_ppgtt.h"
@@ -222,6 +223,9 @@ static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
{
struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
+ if (vm->rsvd.obj)
+ i915_gem_object_put(vm->rsvd.obj);
+
if (intel_vgpu_active(vm->i915))
gen8_ppgtt_notify_vgt(ppgtt, false);
@@ -950,6 +954,41 @@ err_pd:
return ERR_PTR(err);
}
+static int gen8_init_rsvd(struct i915_address_space *vm)
+{
+ struct drm_i915_private *i915 = vm->i915;
+ struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
+ int ret;
+
+ /* The memory will be used only by GPU. */
+ obj = i915_gem_object_create_lmem(i915, PAGE_SIZE,
+ I915_BO_ALLOC_VOLATILE |
+ I915_BO_ALLOC_GPU_ONLY);
+ if (IS_ERR(obj))
+ obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
+
+ vma = i915_vma_instance(obj, vm, NULL);
+ if (IS_ERR(vma)) {
+ ret = PTR_ERR(vma);
+ goto unref;
+ }
+
+ ret = i915_vma_pin(vma, 0, 0, PIN_USER | PIN_HIGH);
+ if (ret)
+ goto unref;
+
+ vm->rsvd.vma = i915_vma_make_unshrinkable(vma);
+ vm->rsvd.obj = obj;
+ vm->total -= vma->node.size;
+ return 0;
+unref:
+ i915_gem_object_put(obj);
+ return ret;
+}
+
/*
* GEN8 legacy ppgtt programming is accomplished through a max 4 PDP registers
* with a net effect resembling a 2-level page table in normal x86 terms. Each
@@ -1031,6 +1070,10 @@ struct i915_ppgtt *gen8_ppgtt_create(struct intel_gt *gt,
if (intel_vgpu_active(gt->i915))
gen8_ppgtt_notify_vgt(ppgtt, true);
+ err = gen8_init_rsvd(&ppgtt->vm);
+ if (err)
+ goto err_put;
+
return ppgtt;
err_put:
diff --git a/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c b/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
index ecc990ec1b95..d650beb8ed22 100644
--- a/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
+++ b/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
@@ -28,11 +28,14 @@ static void irq_disable(struct intel_breadcrumbs *b)
static void __intel_breadcrumbs_arm_irq(struct intel_breadcrumbs *b)
{
+ intel_wakeref_t wakeref;
+
/*
* Since we are waiting on a request, the GPU should be busy
* and should have its own rpm reference.
*/
- if (GEM_WARN_ON(!intel_gt_pm_get_if_awake(b->irq_engine->gt)))
+ wakeref = intel_gt_pm_get_if_awake(b->irq_engine->gt);
+ if (GEM_WARN_ON(!wakeref))
return;
/*
@@ -41,7 +44,7 @@ static void __intel_breadcrumbs_arm_irq(struct intel_breadcrumbs *b)
* which we can add a new waiter and avoid the cost of re-enabling
* the irq.
*/
- WRITE_ONCE(b->irq_armed, true);
+ WRITE_ONCE(b->irq_armed, wakeref);
/* Requests may have completed before we could enable the interrupt. */
if (!b->irq_enabled++ && b->irq_enable(b))
@@ -61,12 +64,14 @@ static void intel_breadcrumbs_arm_irq(struct intel_breadcrumbs *b)
static void __intel_breadcrumbs_disarm_irq(struct intel_breadcrumbs *b)
{
+ intel_wakeref_t wakeref = b->irq_armed;
+
GEM_BUG_ON(!b->irq_enabled);
if (!--b->irq_enabled)
b->irq_disable(b);
- WRITE_ONCE(b->irq_armed, false);
- intel_gt_pm_put_async(b->irq_engine->gt);
+ WRITE_ONCE(b->irq_armed, 0);
+ intel_gt_pm_put_async(b->irq_engine->gt, wakeref);
}
static void intel_breadcrumbs_disarm_irq(struct intel_breadcrumbs *b)
diff --git a/drivers/gpu/drm/i915/gt/intel_breadcrumbs_types.h b/drivers/gpu/drm/i915/gt/intel_breadcrumbs_types.h
index 72dfd3748c4c..bdf09fd67b6e 100644
--- a/drivers/gpu/drm/i915/gt/intel_breadcrumbs_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_breadcrumbs_types.h
@@ -13,6 +13,7 @@
#include <linux/types.h>
#include "intel_engine_types.h"
+#include "intel_wakeref.h"
/*
* Rather than have every client wait upon all user interrupts,
@@ -43,7 +44,7 @@ struct intel_breadcrumbs {
spinlock_t irq_lock; /* protects the interrupt from hardirq context */
struct irq_work irq_work; /* for use from inside irq_lock */
unsigned int irq_enabled;
- bool irq_armed;
+ intel_wakeref_t irq_armed;
/* Not all breadcrumbs are attached to physical HW */
intel_engine_mask_t engine_mask;
diff --git a/drivers/gpu/drm/i915/gt/intel_context.c b/drivers/gpu/drm/i915/gt/intel_context.c
index a53b26178f0a..a2f1245741bb 100644
--- a/drivers/gpu/drm/i915/gt/intel_context.c
+++ b/drivers/gpu/drm/i915/gt/intel_context.c
@@ -6,6 +6,7 @@
#include "gem/i915_gem_context.h"
#include "gem/i915_gem_pm.h"
+#include "i915_drm_client.h"
#include "i915_drv.h"
#include "i915_trace.h"
@@ -50,6 +51,7 @@ intel_context_create(struct intel_engine_cs *engine)
int intel_context_alloc_state(struct intel_context *ce)
{
+ struct i915_gem_context *ctx;
int err = 0;
if (mutex_lock_interruptible(&ce->pin_mutex))
@@ -66,6 +68,18 @@ int intel_context_alloc_state(struct intel_context *ce)
goto unlock;
set_bit(CONTEXT_ALLOC_BIT, &ce->flags);
+
+ rcu_read_lock();
+ ctx = rcu_dereference(ce->gem_context);
+ if (ctx && !kref_get_unless_zero(&ctx->ref))
+ ctx = NULL;
+ rcu_read_unlock();
+ if (ctx) {
+ if (ctx->client)
+ i915_drm_client_add_context_objects(ctx->client,
+ ce);
+ i915_gem_context_put(ctx);
+ }
}
unlock:
diff --git a/drivers/gpu/drm/i915/gt/intel_context.h b/drivers/gpu/drm/i915/gt/intel_context.h
index a80e3b7c24ff..25564c01507e 100644
--- a/drivers/gpu/drm/i915/gt/intel_context.h
+++ b/drivers/gpu/drm/i915/gt/intel_context.h
@@ -212,7 +212,7 @@ static inline void intel_context_enter(struct intel_context *ce)
return;
ce->ops->enter(ce);
- intel_gt_pm_get(ce->vm->gt);
+ ce->wakeref = intel_gt_pm_get(ce->vm->gt);
}
static inline void intel_context_mark_active(struct intel_context *ce)
@@ -229,7 +229,7 @@ static inline void intel_context_exit(struct intel_context *ce)
if (--ce->active_count)
return;
- intel_gt_pm_put_async(ce->vm->gt);
+ intel_gt_pm_put_async(ce->vm->gt, ce->wakeref);
ce->ops->exit(ce);
}
diff --git a/drivers/gpu/drm/i915/gt/intel_context_types.h b/drivers/gpu/drm/i915/gt/intel_context_types.h
index aceaac28a33e..7eccbd70d89f 100644
--- a/drivers/gpu/drm/i915/gt/intel_context_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_context_types.h
@@ -17,6 +17,7 @@
#include "i915_utils.h"
#include "intel_engine_types.h"
#include "intel_sseu.h"
+#include "intel_wakeref.h"
#include "uc/intel_guc_fwif.h"
@@ -112,6 +113,7 @@ struct intel_context {
u32 ring_size;
struct intel_ring *ring;
struct intel_timeline *timeline;
+ intel_wakeref_t wakeref;
unsigned long flags;
#define CONTEXT_BARRIER_BIT 0
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
index 4a11219e560e..40687806d22a 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
@@ -47,7 +47,7 @@
#define GEN9_LR_CONTEXT_RENDER_SIZE (22 * PAGE_SIZE)
#define GEN11_LR_CONTEXT_RENDER_SIZE (14 * PAGE_SIZE)
-#define GEN8_LR_CONTEXT_OTHER_SIZE ( 2 * PAGE_SIZE)
+#define GEN8_LR_CONTEXT_OTHER_SIZE (2 * PAGE_SIZE)
#define MAX_MMIO_BASES 3
struct engine_info {
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c b/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
index 9a527e1f5be6..1a8e2b7db013 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
@@ -188,7 +188,7 @@ static void heartbeat(struct work_struct *wrk)
* low latency and no jitter] the chance to naturally
* complete before being preempted.
*/
- attr.priority = 0;
+ attr.priority = I915_PRIORITY_NORMAL;
if (rq->sched.attr.priority >= attr.priority)
attr.priority = I915_PRIORITY_HEARTBEAT;
if (rq->sched.attr.priority >= attr.priority)
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pm.c b/drivers/gpu/drm/i915/gt/intel_engine_pm.c
index e91fc881dbf1..96bdb93a948d 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_pm.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_pm.c
@@ -63,7 +63,7 @@ static int __engine_unpark(struct intel_wakeref *wf)
ENGINE_TRACE(engine, "\n");
- intel_gt_pm_get(engine->gt);
+ engine->wakeref_track = intel_gt_pm_get(engine->gt);
/* Discard stale context state from across idling */
ce = engine->kernel_context;
@@ -122,6 +122,7 @@ __queue_and_release_pm(struct i915_request *rq,
*/
GEM_BUG_ON(rq->context->active_count != 1);
__intel_gt_pm_get(engine->gt);
+ rq->context->wakeref = intel_wakeref_track(&engine->gt->wakeref);
/*
* We have to serialise all potential retirement paths with our
@@ -285,7 +286,7 @@ static int __engine_park(struct intel_wakeref *wf)
engine->park(engine);
/* While gt calls i915_vma_parked(), we have to break the lock cycle */
- intel_gt_pm_put_async(engine->gt);
+ intel_gt_pm_put_async(engine->gt, engine->wakeref_track);
return 0;
}
@@ -296,7 +297,7 @@ static const struct intel_wakeref_ops wf_ops = {
void intel_engine_init__pm(struct intel_engine_cs *engine)
{
- intel_wakeref_init(&engine->wakeref, engine->i915, &wf_ops);
+ intel_wakeref_init(&engine->wakeref, engine->i915, &wf_ops, engine->name);
intel_engine_init_heartbeat(engine);
intel_gsc_idle_msg_enable(engine);
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_regs.h b/drivers/gpu/drm/i915/gt/intel_engine_regs.h
index fdd4ddd3a978..a8eac59e3779 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_regs.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine_regs.h
@@ -118,9 +118,15 @@
#define CCID_EXTENDED_STATE_RESTORE BIT(2)
#define CCID_EXTENDED_STATE_SAVE BIT(3)
#define RING_BB_PER_CTX_PTR(base) _MMIO((base) + 0x1c0) /* gen8+ */
+#define PER_CTX_BB_FORCE BIT(2)
+#define PER_CTX_BB_VALID BIT(0)
+
#define RING_INDIRECT_CTX(base) _MMIO((base) + 0x1c4) /* gen8+ */
#define RING_INDIRECT_CTX_OFFSET(base) _MMIO((base) + 0x1c8) /* gen8+ */
#define ECOSKPD(base) _MMIO((base) + 0x1d0)
+#define XEHP_BLITTER_SCHEDULING_MODE_MASK REG_GENMASK(12, 11)
+#define XEHP_BLITTER_ROUND_ROBIN_MODE \
+ REG_FIELD_PREP(XEHP_BLITTER_SCHEDULING_MODE_MASK, 1)
#define ECO_CONSTANT_BUFFER_SR_DISABLE REG_BIT(4)
#define ECO_GATING_CX_ONLY REG_BIT(3)
#define GEN6_BLITTER_FBC_NOTIFY REG_BIT(3)
@@ -257,5 +263,7 @@
#define VDBOX_CGCTL3F18(base) _MMIO((base) + 0x3f18)
#define ALNUNIT_CLKGATE_DIS REG_BIT(13)
+#define VDBOX_CGCTL3F1C(base) _MMIO((base) + 0x3f1c)
+#define MFXPIPE_CLKGATE_DIS REG_BIT(3)
#endif /* __INTEL_ENGINE_REGS__ */
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_types.h b/drivers/gpu/drm/i915/gt/intel_engine_types.h
index 8769760257fd..960e6be2042f 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine_types.h
@@ -446,7 +446,9 @@ struct intel_engine_cs {
unsigned long serial;
unsigned long wakeref_serial;
+ intel_wakeref_t wakeref_track;
struct intel_wakeref wakeref;
+
struct file *default_state;
struct {
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_user.c b/drivers/gpu/drm/i915/gt/intel_engine_user.c
index 118164ddbb2e..833987015b8b 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_user.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_user.c
@@ -41,12 +41,15 @@ void intel_engine_add_user(struct intel_engine_cs *engine)
llist_add(&engine->uabi_llist, &engine->i915->uabi_engines_llist);
}
-static const u8 uabi_classes[] = {
+#define I915_NO_UABI_CLASS ((u16)(-1))
+
+static const u16 uabi_classes[] = {
[RENDER_CLASS] = I915_ENGINE_CLASS_RENDER,
[COPY_ENGINE_CLASS] = I915_ENGINE_CLASS_COPY,
[VIDEO_DECODE_CLASS] = I915_ENGINE_CLASS_VIDEO,
[VIDEO_ENHANCEMENT_CLASS] = I915_ENGINE_CLASS_VIDEO_ENHANCE,
[COMPUTE_CLASS] = I915_ENGINE_CLASS_COMPUTE,
+ [OTHER_CLASS] = I915_NO_UABI_CLASS, /* Not exposed to users, no uabi class. */
};
static int engine_cmp(void *priv, const struct list_head *A,
@@ -200,6 +203,7 @@ static void engine_rename(struct intel_engine_cs *engine, const char *name, u16
void intel_engines_driver_register(struct drm_i915_private *i915)
{
+ u16 name_instance, other_instance = 0;
struct legacy_ring ring = {};
struct list_head *it, *next;
struct rb_node **p, *prev;
@@ -216,27 +220,28 @@ void intel_engines_driver_register(struct drm_i915_private *i915)
if (intel_gt_has_unrecoverable_error(engine->gt))
continue; /* ignore incomplete engines */
- /*
- * We don't want to expose the GSC engine to the users, but we
- * still rename it so it is easier to identify in the debug logs
- */
- if (engine->id == GSC0) {
- engine_rename(engine, "gsc", 0);
- continue;
- }
-
GEM_BUG_ON(engine->class >= ARRAY_SIZE(uabi_classes));
engine->uabi_class = uabi_classes[engine->class];
+ if (engine->uabi_class == I915_NO_UABI_CLASS) {
+ name_instance = other_instance++;
+ } else {
+ GEM_BUG_ON(engine->uabi_class >=
+ ARRAY_SIZE(i915->engine_uabi_class_count));
+ name_instance =
+ i915->engine_uabi_class_count[engine->uabi_class]++;
+ }
+ engine->uabi_instance = name_instance;
- GEM_BUG_ON(engine->uabi_class >=
- ARRAY_SIZE(i915->engine_uabi_class_count));
- engine->uabi_instance =
- i915->engine_uabi_class_count[engine->uabi_class]++;
-
- /* Replace the internal name with the final user facing name */
+ /*
+ * Replace the internal name with the final user and log facing
+ * name.
+ */
engine_rename(engine,
intel_engine_class_repr(engine->class),
- engine->uabi_instance);
+ name_instance);
+
+ if (engine->uabi_class == I915_NO_UABI_CLASS)
+ continue;
rb_link_node(&engine->uabi_node, prev, p);
rb_insert_color(&engine->uabi_node, &i915->uabi_engines);
diff --git a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
index e8f42ec6b1b4..42aade0faf2d 100644
--- a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
+++ b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
@@ -630,7 +630,7 @@ static void __execlists_schedule_out(struct i915_request * const rq,
execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_OUT);
if (engine->fw_domain && !--engine->fw_active)
intel_uncore_forcewake_put(engine->uncore, engine->fw_domain);
- intel_gt_pm_put_async(engine->gt);
+ intel_gt_pm_put_async_untracked(engine->gt);
/*
* If this is part of a virtual engine, its next request may
diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt.c b/drivers/gpu/drm/i915/gt/intel_ggtt.c
index 15fc8e4703f4..21a7e3191c18 100644
--- a/drivers/gpu/drm/i915/gt/intel_ggtt.c
+++ b/drivers/gpu/drm/i915/gt/intel_ggtt.c
@@ -245,16 +245,15 @@ static void guc_ggtt_invalidate(struct i915_ggtt *ggtt)
gen8_ggtt_invalidate(ggtt);
list_for_each_entry(gt, &ggtt->gt_list, ggtt_link) {
- if (intel_guc_tlb_invalidation_is_available(&gt->uc.guc)) {
+ if (intel_guc_tlb_invalidation_is_available(&gt->uc.guc))
guc_ggtt_ct_invalidate(gt);
- } else if (GRAPHICS_VER(i915) >= 12) {
+ else if (GRAPHICS_VER(i915) >= 12)
intel_uncore_write_fw(gt->uncore,
GEN12_GUC_TLB_INV_CR,
GEN12_GUC_TLB_INV_CR_INVALIDATE);
- } else {
+ else
intel_uncore_write_fw(gt->uncore,
GEN8_GTCR, GEN8_GTCR_INVALIDATE);
- }
}
}
@@ -297,7 +296,7 @@ static bool should_update_ggtt_with_bind(struct i915_ggtt *ggtt)
return intel_gt_is_bind_context_ready(gt);
}
-static struct intel_context *gen8_ggtt_bind_get_ce(struct i915_ggtt *ggtt)
+static struct intel_context *gen8_ggtt_bind_get_ce(struct i915_ggtt *ggtt, intel_wakeref_t *wakeref)
{
struct intel_context *ce;
struct intel_gt *gt = ggtt->vm.gt;
@@ -314,7 +313,8 @@ static struct intel_context *gen8_ggtt_bind_get_ce(struct i915_ggtt *ggtt)
* would conflict with fs_reclaim trying to allocate memory while
* doing rpm_resume().
*/
- if (!intel_gt_pm_get_if_awake(gt))
+ *wakeref = intel_gt_pm_get_if_awake(gt);
+ if (!*wakeref)
return NULL;
intel_engine_pm_get(ce->engine);
@@ -322,10 +322,10 @@ static struct intel_context *gen8_ggtt_bind_get_ce(struct i915_ggtt *ggtt)
return ce;
}
-static void gen8_ggtt_bind_put_ce(struct intel_context *ce)
+static void gen8_ggtt_bind_put_ce(struct intel_context *ce, intel_wakeref_t wakeref)
{
intel_engine_pm_put(ce->engine);
- intel_gt_pm_put(ce->engine->gt);
+ intel_gt_pm_put(ce->engine->gt, wakeref);
}
static bool gen8_ggtt_bind_ptes(struct i915_ggtt *ggtt, u32 offset,
@@ -338,12 +338,13 @@ static bool gen8_ggtt_bind_ptes(struct i915_ggtt *ggtt, u32 offset,
struct sgt_iter iter;
struct i915_request *rq;
struct intel_context *ce;
+ intel_wakeref_t wakeref;
u32 *cs;
if (!num_entries)
return true;
- ce = gen8_ggtt_bind_get_ce(ggtt);
+ ce = gen8_ggtt_bind_get_ce(ggtt, &wakeref);
if (!ce)
return false;
@@ -419,13 +420,13 @@ queue_err_rq:
offset += n_ptes;
}
- gen8_ggtt_bind_put_ce(ce);
+ gen8_ggtt_bind_put_ce(ce, wakeref);
return true;
err_rq:
i915_request_put(rq);
put_ce:
- gen8_ggtt_bind_put_ce(ce);
+ gen8_ggtt_bind_put_ce(ce, wakeref);
return false;
}
diff --git a/drivers/gpu/drm/i915/gt/intel_gt.c b/drivers/gpu/drm/i915/gt/intel_gt.c
index ba1186fc524f..a425db5ed3a2 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt.c
@@ -451,7 +451,7 @@ void intel_gt_flush_ggtt_writes(struct intel_gt *gt)
spin_lock_irqsave(&uncore->lock, flags);
intel_uncore_posting_read_fw(uncore,
- RING_HEAD(RENDER_RING_BASE));
+ RING_TAIL(RENDER_RING_BASE));
spin_unlock_irqrestore(&uncore->lock, flags);
}
}
diff --git a/drivers/gpu/drm/i915/gt/intel_gt.h b/drivers/gpu/drm/i915/gt/intel_gt.h
index e1f13735f530..608f5c872928 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt.h
@@ -82,6 +82,10 @@ struct drm_printer;
##__VA_ARGS__); \
} while (0)
+#define NEEDS_FASTCOLOR_BLT_WABB(engine) ( \
+ IS_GFX_GT_IP_RANGE(engine->gt, IP_VER(12, 55), IP_VER(12, 71)) && \
+ engine->class == COPY_ENGINE_CLASS && engine->instance == 0)
+
static inline bool gt_is_root(struct intel_gt *gt)
{
return !gt->info.id;
@@ -114,6 +118,11 @@ static inline struct intel_gt *gsc_to_gt(struct intel_gsc *gsc)
return container_of(gsc, struct intel_gt, gsc);
}
+static inline struct drm_i915_private *guc_to_i915(struct intel_guc *guc)
+{
+ return guc_to_gt(guc)->i915;
+}
+
void intel_gt_common_init_early(struct intel_gt *gt);
int intel_root_gt_init_early(struct drm_i915_private *i915);
int intel_gt_assign_ggtt(struct intel_gt *gt);
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_mcr.c b/drivers/gpu/drm/i915/gt/intel_gt_mcr.c
index 34913912d8ae..e253750a51c5 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_mcr.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_mcr.c
@@ -388,8 +388,7 @@ void intel_gt_mcr_lock(struct intel_gt *gt, unsigned long *flags)
* registers. This wakeref will be released in the unlock
* routine.
*
- * This is expected to become a formally documented/numbered
- * workaround soon.
+ * Wa_22018931422
*/
intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_GT);
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.c b/drivers/gpu/drm/i915/gt/intel_gt_pm.c
index f5899d503e23..220ac4f92edf 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_pm.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.c
@@ -28,19 +28,20 @@
static void user_forcewake(struct intel_gt *gt, bool suspend)
{
int count = atomic_read(&gt->user_wakeref);
+ intel_wakeref_t wakeref;
/* Inside suspend/resume so single threaded, no races to worry about. */
if (likely(!count))
return;
- intel_gt_pm_get(gt);
+ wakeref = intel_gt_pm_get(gt);
if (suspend) {
GEM_BUG_ON(count > atomic_read(&gt->wakeref.count));
atomic_sub(count, &gt->wakeref.count);
} else {
atomic_add(count, &gt->wakeref.count);
}
- intel_gt_pm_put(gt);
+ intel_gt_pm_put(gt, wakeref);
}
static void runtime_begin(struct intel_gt *gt)
@@ -138,7 +139,7 @@ void intel_gt_pm_init_early(struct intel_gt *gt)
* runtime_pm is per-device rather than per-tile, so this is still the
* correct structure.
*/
- intel_wakeref_init(&gt->wakeref, gt->i915, &wf_ops);
+ intel_wakeref_init(&gt->wakeref, gt->i915, &wf_ops, "GT");
seqcount_mutex_init(&gt->stats.lock, &gt->wakeref.mutex);
}
@@ -167,7 +168,7 @@ static void gt_sanitize(struct intel_gt *gt, bool force)
enum intel_engine_id id;
intel_wakeref_t wakeref;
- GT_TRACE(gt, "force:%s", str_yes_no(force));
+ GT_TRACE(gt, "force:%s\n", str_yes_no(force));
/* Use a raw wakeref to avoid calling intel_display_power_get early */
wakeref = intel_runtime_pm_get(gt->uncore->rpm);
@@ -236,6 +237,7 @@ int intel_gt_resume(struct intel_gt *gt)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
+ intel_wakeref_t wakeref;
int err;
err = intel_gt_has_unrecoverable_error(gt);
@@ -252,7 +254,7 @@ int intel_gt_resume(struct intel_gt *gt)
*/
gt_sanitize(gt, true);
- intel_gt_pm_get(gt);
+ wakeref = intel_gt_pm_get(gt);
intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_ALL);
intel_rc6_sanitize(&gt->rc6);
@@ -295,7 +297,7 @@ int intel_gt_resume(struct intel_gt *gt)
out_fw:
intel_uncore_forcewake_put(gt->uncore, FORCEWAKE_ALL);
- intel_gt_pm_put(gt);
+ intel_gt_pm_put(gt, wakeref);
intel_gt_bind_context_set_ready(gt);
return err;
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.h b/drivers/gpu/drm/i915/gt/intel_gt_pm.h
index b1eeb5b33918..911fd0160221 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_pm.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.h
@@ -16,19 +16,28 @@ static inline bool intel_gt_pm_is_awake(const struct intel_gt *gt)
return intel_wakeref_is_active(&gt->wakeref);
}
-static inline void intel_gt_pm_get(struct intel_gt *gt)
+static inline void intel_gt_pm_get_untracked(struct intel_gt *gt)
{
intel_wakeref_get(&gt->wakeref);
}
+static inline intel_wakeref_t intel_gt_pm_get(struct intel_gt *gt)
+{
+ intel_gt_pm_get_untracked(gt);
+ return intel_wakeref_track(&gt->wakeref);
+}
+
static inline void __intel_gt_pm_get(struct intel_gt *gt)
{
__intel_wakeref_get(&gt->wakeref);
}
-static inline bool intel_gt_pm_get_if_awake(struct intel_gt *gt)
+static inline intel_wakeref_t intel_gt_pm_get_if_awake(struct intel_gt *gt)
{
- return intel_wakeref_get_if_active(&gt->wakeref);
+ if (!intel_wakeref_get_if_active(&gt->wakeref))
+ return 0;
+
+ return intel_wakeref_track(&gt->wakeref);
}
static inline void intel_gt_pm_might_get(struct intel_gt *gt)
@@ -36,12 +45,18 @@ static inline void intel_gt_pm_might_get(struct intel_gt *gt)
intel_wakeref_might_get(&gt->wakeref);
}
-static inline void intel_gt_pm_put(struct intel_gt *gt)
+static inline void intel_gt_pm_put_untracked(struct intel_gt *gt)
{
intel_wakeref_put(&gt->wakeref);
}
-static inline void intel_gt_pm_put_async(struct intel_gt *gt)
+static inline void intel_gt_pm_put(struct intel_gt *gt, intel_wakeref_t handle)
+{
+ intel_wakeref_untrack(&gt->wakeref, handle);
+ intel_gt_pm_put_untracked(gt);
+}
+
+static inline void intel_gt_pm_put_async_untracked(struct intel_gt *gt)
{
intel_wakeref_put_async(&gt->wakeref);
}
@@ -51,9 +66,14 @@ static inline void intel_gt_pm_might_put(struct intel_gt *gt)
intel_wakeref_might_put(&gt->wakeref);
}
-#define with_intel_gt_pm(gt, tmp) \
- for (tmp = 1, intel_gt_pm_get(gt); tmp; \
- intel_gt_pm_put(gt), tmp = 0)
+static inline void intel_gt_pm_put_async(struct intel_gt *gt, intel_wakeref_t handle)
+{
+ intel_wakeref_untrack(&gt->wakeref, handle);
+ intel_gt_pm_put_async_untracked(gt);
+}
+
+#define with_intel_gt_pm(gt, wf) \
+ for (wf = intel_gt_pm_get(gt); wf; intel_gt_pm_put(gt, wf), wf = 0)
/**
* with_intel_gt_pm_if_awake - if GT is PM awake, get a reference to prevent
@@ -64,7 +84,7 @@ static inline void intel_gt_pm_might_put(struct intel_gt *gt)
* @wf: pointer to a temporary wakeref.
*/
#define with_intel_gt_pm_if_awake(gt, wf) \
- for (wf = intel_gt_pm_get_if_awake(gt); wf; intel_gt_pm_put_async(gt), wf = 0)
+ for (wf = intel_gt_pm_get_if_awake(gt); wf; intel_gt_pm_put_async(gt, wf), wf = 0)
static inline int intel_gt_pm_wait_for_idle(struct intel_gt *gt)
{
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c b/drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c
index f900cc68d6d9..7114c116e928 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c
@@ -27,7 +27,7 @@
void intel_gt_pm_debugfs_forcewake_user_open(struct intel_gt *gt)
{
atomic_inc(&gt->user_wakeref);
- intel_gt_pm_get(gt);
+ intel_gt_pm_get_untracked(gt);
if (GRAPHICS_VER(gt->i915) >= 6)
intel_uncore_forcewake_user_get(gt->uncore);
}
@@ -36,7 +36,7 @@ void intel_gt_pm_debugfs_forcewake_user_release(struct intel_gt *gt)
{
if (GRAPHICS_VER(gt->i915) >= 6)
intel_uncore_forcewake_user_put(gt->uncore);
- intel_gt_pm_put(gt);
+ intel_gt_pm_put_untracked(gt);
atomic_dec(&gt->user_wakeref);
}
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_regs.h b/drivers/gpu/drm/i915/gt/intel_gt_regs.h
index eecd0a87a647..9de41703fae5 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_regs.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt_regs.h
@@ -537,6 +537,9 @@
#define XEHP_SQCM MCR_REG(0x8724)
#define EN_32B_ACCESS REG_BIT(30)
+#define MTL_GSCPSMI_BASEADDR_LSB _MMIO(0x880c)
+#define MTL_GSCPSMI_BASEADDR_MSB _MMIO(0x8810)
+
#define HSW_IDICR _MMIO(0x9008)
#define IDIHASHMSK(x) (((x) & 0x3f) << 16)
diff --git a/drivers/gpu/drm/i915/gt/intel_gtt.c b/drivers/gpu/drm/i915/gt/intel_gtt.c
index 4fbed27ef0ec..86f73fe558ca 100644
--- a/drivers/gpu/drm/i915/gt/intel_gtt.c
+++ b/drivers/gpu/drm/i915/gt/intel_gtt.c
@@ -63,6 +63,9 @@ struct drm_i915_gem_object *alloc_pt_lmem(struct i915_address_space *vm, int sz)
if (!IS_ERR(obj)) {
obj->base.resv = i915_vm_resv_get(vm);
obj->shares_resv_from = vm;
+
+ if (vm->fpriv)
+ i915_drm_client_add_object(vm->fpriv->client, obj);
}
return obj;
@@ -84,6 +87,9 @@ struct drm_i915_gem_object *alloc_pt_dma(struct i915_address_space *vm, int sz)
if (!IS_ERR(obj)) {
obj->base.resv = i915_vm_resv_get(vm);
obj->shares_resv_from = vm;
+
+ if (vm->fpriv)
+ i915_drm_client_add_object(vm->fpriv->client, obj);
}
return obj;
@@ -95,6 +101,16 @@ int map_pt_dma(struct i915_address_space *vm, struct drm_i915_gem_object *obj)
void *vaddr;
type = intel_gt_coherent_map_type(vm->gt, obj, true);
+ /*
+ * FIXME: It is suspected that some Address Translation Service (ATS)
+ * issue on IOMMU is causing CAT errors to occur on some MTL workloads.
+ * Applying a write barrier to the ppgtt set entry functions appeared
+ * to have no effect, so we must temporarily use I915_MAP_WC here on
+ * MTL until a proper ATS solution is found.
+ */
+ if (IS_METEORLAKE(vm->i915))
+ type = I915_MAP_WC;
+
vaddr = i915_gem_object_pin_map_unlocked(obj, type);
if (IS_ERR(vaddr))
return PTR_ERR(vaddr);
@@ -109,6 +125,16 @@ int map_pt_dma_locked(struct i915_address_space *vm, struct drm_i915_gem_object
void *vaddr;
type = intel_gt_coherent_map_type(vm->gt, obj, true);
+ /*
+ * FIXME: It is suspected that some Address Translation Service (ATS)
+ * issue on IOMMU is causing CAT errors to occur on some MTL workloads.
+ * Applying a write barrier to the ppgtt set entry functions appeared
+ * to have no effect, so we must temporarily use I915_MAP_WC here on
+ * MTL until a proper ATS solution is found.
+ */
+ if (IS_METEORLAKE(vm->i915))
+ type = I915_MAP_WC;
+
vaddr = i915_gem_object_pin_map(obj, type);
if (IS_ERR(vaddr))
return PTR_ERR(vaddr);
diff --git a/drivers/gpu/drm/i915/gt/intel_gtt.h b/drivers/gpu/drm/i915/gt/intel_gtt.h
index b471edac2699..6b85222ee3ea 100644
--- a/drivers/gpu/drm/i915/gt/intel_gtt.h
+++ b/drivers/gpu/drm/i915/gt/intel_gtt.h
@@ -249,8 +249,13 @@ struct i915_address_space {
struct work_struct release_work;
struct drm_mm mm;
+ struct {
+ struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
+ } rsvd;
struct intel_gt *gt;
struct drm_i915_private *i915;
+ struct drm_i915_file_private *fpriv;
struct device *dma;
u64 total; /* size addr space maps (ex. 2GB for ggtt) */
u64 reserved; /* size addr space reserved */
diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c
index eaf66d903166..7c367ba8d9dc 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc.c
+++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
@@ -829,6 +829,18 @@ lrc_ring_indirect_offset_default(const struct intel_engine_cs *engine)
}
static void
+lrc_setup_bb_per_ctx(u32 *regs,
+ const struct intel_engine_cs *engine,
+ u32 ctx_bb_ggtt_addr)
+{
+ GEM_BUG_ON(lrc_ring_wa_bb_per_ctx(engine) == -1);
+ regs[lrc_ring_wa_bb_per_ctx(engine) + 1] =
+ ctx_bb_ggtt_addr |
+ PER_CTX_BB_FORCE |
+ PER_CTX_BB_VALID;
+}
+
+static void
lrc_setup_indirect_ctx(u32 *regs,
const struct intel_engine_cs *engine,
u32 ctx_bb_ggtt_addr,
@@ -1020,7 +1032,13 @@ static u32 context_wa_bb_offset(const struct intel_context *ce)
return PAGE_SIZE * ce->wa_bb_page;
}
-static u32 *context_indirect_bb(const struct intel_context *ce)
+/*
+ * per_ctx below determines which WABB section is used.
+ * When true, the function returns the location of the
+ * PER_CTX_BB. When false, the function returns the
+ * location of the INDIRECT_CTX.
+ */
+static u32 *context_wabb(const struct intel_context *ce, bool per_ctx)
{
void *ptr;
@@ -1029,6 +1047,7 @@ static u32 *context_indirect_bb(const struct intel_context *ce)
ptr = ce->lrc_reg_state;
ptr -= LRC_STATE_OFFSET; /* back to start of context image */
ptr += context_wa_bb_offset(ce);
+ ptr += per_ctx ? PAGE_SIZE : 0;
return ptr;
}
@@ -1105,7 +1124,8 @@ __lrc_alloc_state(struct intel_context *ce, struct intel_engine_cs *engine)
if (GRAPHICS_VER(engine->i915) >= 12) {
ce->wa_bb_page = context_size / PAGE_SIZE;
- context_size += PAGE_SIZE;
+ /* INDIRECT_CTX and PER_CTX_BB need separate pages. */
+ context_size += PAGE_SIZE * 2;
}
if (intel_context_is_parent(ce) && intel_engine_uses_guc(engine)) {
@@ -1407,12 +1427,85 @@ gen12_emit_indirect_ctx_xcs(const struct intel_context *ce, u32 *cs)
return gen12_emit_aux_table_inv(ce->engine, cs);
}
+static u32 *xehp_emit_fastcolor_blt_wabb(const struct intel_context *ce, u32 *cs)
+{
+ struct intel_gt *gt = ce->engine->gt;
+ int mocs = gt->mocs.uc_index << 1;
+
+ /**
+ * Wa_16018031267 / Wa_16018063123 requires that SW forces the
+ * main copy engine arbitration into round robin mode. We
+ * additionally need to submit the following WABB blt command
+ * to produce 4 subblits with each subblit generating 0 byte
+ * write requests as WABB:
+ *
+ * XY_FASTCOLOR_BLT
+ * BG0 -> 5100000E
+ * BG1 -> 0000003F (Dest pitch)
+ * BG2 -> 00000000 (X1, Y1) = (0, 0)
+ * BG3 -> 00040001 (X2, Y2) = (1, 4)
+ * BG4 -> scratch
+ * BG5 -> scratch
+ * BG6-12 -> 00000000
+ * BG13 -> 20004004 (Surf. Width= 2,Surf. Height = 5 )
+ * BG14 -> 00000010 (Qpitch = 4)
+ * BG15 -> 00000000
+ */
+ *cs++ = XY_FAST_COLOR_BLT_CMD | (16 - 2);
+ *cs++ = FIELD_PREP(XY_FAST_COLOR_BLT_MOCS_MASK, mocs) | 0x3f;
+ *cs++ = 0;
+ *cs++ = 4 << 16 | 1;
+ *cs++ = lower_32_bits(i915_vma_offset(ce->vm->rsvd.vma));
+ *cs++ = upper_32_bits(i915_vma_offset(ce->vm->rsvd.vma));
+ *cs++ = 0;
+ *cs++ = 0;
+ *cs++ = 0;
+ *cs++ = 0;
+ *cs++ = 0;
+ *cs++ = 0;
+ *cs++ = 0;
+ *cs++ = 0x20004004;
+ *cs++ = 0x10;
+ *cs++ = 0;
+
+ return cs;
+}
+
+static u32 *
+xehp_emit_per_ctx_bb(const struct intel_context *ce, u32 *cs)
+{
+ /* Wa_16018031267, Wa_16018063123 */
+ if (NEEDS_FASTCOLOR_BLT_WABB(ce->engine))
+ cs = xehp_emit_fastcolor_blt_wabb(ce, cs);
+
+ return cs;
+}
+
+static void
+setup_per_ctx_bb(const struct intel_context *ce,
+ const struct intel_engine_cs *engine,
+ u32 *(*emit)(const struct intel_context *, u32 *))
+{
+ /* Place PER_CTX_BB on next page after INDIRECT_CTX */
+ u32 * const start = context_wabb(ce, true);
+ u32 *cs;
+
+ cs = emit(ce, start);
+
+ /* PER_CTX_BB must manually terminate */
+ *cs++ = MI_BATCH_BUFFER_END;
+
+ GEM_BUG_ON(cs - start > I915_GTT_PAGE_SIZE / sizeof(*cs));
+ lrc_setup_bb_per_ctx(ce->lrc_reg_state, engine,
+ lrc_indirect_bb(ce) + PAGE_SIZE);
+}
+
static void
setup_indirect_ctx_bb(const struct intel_context *ce,
const struct intel_engine_cs *engine,
u32 *(*emit)(const struct intel_context *, u32 *))
{
- u32 * const start = context_indirect_bb(ce);
+ u32 * const start = context_wabb(ce, false);
u32 *cs;
cs = emit(ce, start);
@@ -1511,6 +1604,7 @@ u32 lrc_update_regs(const struct intel_context *ce,
/* Mutually exclusive wrt to global indirect bb */
GEM_BUG_ON(engine->wa_ctx.indirect_ctx.size);
setup_indirect_ctx_bb(ce, engine, fn);
+ setup_per_ctx_bb(ce, engine, xehp_emit_per_ctx_bb);
}
return lrc_descriptor(ce) | CTX_DESC_FORCE_RESTORE;
diff --git a/drivers/gpu/drm/i915/gt/intel_reset.c b/drivers/gpu/drm/i915/gt/intel_reset.c
index d5ed904f355d..6801f8b95c53 100644
--- a/drivers/gpu/drm/i915/gt/intel_reset.c
+++ b/drivers/gpu/drm/i915/gt/intel_reset.c
@@ -1293,7 +1293,7 @@ int __intel_engine_reset_bh(struct intel_engine_cs *engine, const char *msg)
if (msg)
drm_notice(&engine->i915->drm,
"Resetting %s for %s\n", engine->name, msg);
- atomic_inc(&engine->i915->gpu_error.reset_engine_count[engine->uabi_class]);
+ i915_increase_reset_engine_count(&engine->i915->gpu_error, engine);
ret = intel_gt_reset_engine(engine);
if (ret) {
diff --git a/drivers/gpu/drm/i915/gt/intel_sseu.c b/drivers/gpu/drm/i915/gt/intel_sseu.c
index f602895f6d0d..6a3246240e81 100644
--- a/drivers/gpu/drm/i915/gt/intel_sseu.c
+++ b/drivers/gpu/drm/i915/gt/intel_sseu.c
@@ -849,13 +849,12 @@ void intel_sseu_print_topology(struct drm_i915_private *i915,
const struct sseu_dev_info *sseu,
struct drm_printer *p)
{
- if (sseu->max_slices == 0) {
+ if (sseu->max_slices == 0)
drm_printf(p, "Unavailable\n");
- } else if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50)) {
+ else if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50))
sseu_print_xehp_topology(sseu, p);
- } else {
+ else
sseu_print_hsw_topology(sseu, p);
- }
}
void intel_sseu_print_ss_info(const char *type,
diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.c b/drivers/gpu/drm/i915/gt/intel_workarounds.c
index 192ac0e59afa..4cbf9e512645 100644
--- a/drivers/gpu/drm/i915/gt/intel_workarounds.c
+++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c
@@ -1663,8 +1663,22 @@ xelpg_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
}
static void
+wa_16021867713(struct intel_gt *gt, struct i915_wa_list *wal)
+{
+ struct intel_engine_cs *engine;
+ int id;
+
+ for_each_engine(engine, gt, id)
+ if (engine->class == VIDEO_DECODE_CLASS)
+ wa_write_or(wal, VDBOX_CGCTL3F1C(engine->mmio_base),
+ MFXPIPE_CLKGATE_DIS);
+}
+
+static void
xelpmp_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
{
+ wa_16021867713(gt, wal);
+
/*
* Wa_14018778641
* Wa_18018781329
@@ -1674,6 +1688,9 @@ xelpmp_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
*/
wa_write_or(wal, XELPMP_GSC_MOD_CTRL, FORCE_MISS_FTLB);
+ /* Wa_22016670082 */
+ wa_write_or(wal, GEN12_SQCNT1, GEN12_STRICT_RAR_ENABLE);
+
debug_dump_steering(gt);
}
@@ -2340,14 +2357,6 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
0, true);
}
- if (IS_DG2_G11(i915) || IS_DG2_G10(i915)) {
- /* Wa_22014600077:dg2 */
- wa_mcr_add(wal, GEN10_CACHE_MODE_SS, 0,
- _MASKED_BIT_ENABLE(ENABLE_EU_COUNT_FOR_TDL_FLUSH),
- 0 /* Wa_14012342262 write-only reg, so skip verification */,
- true);
- }
-
if (IS_DG2(i915) || IS_ALDERLAKE_P(i915) || IS_ALDERLAKE_S(i915) ||
IS_DG1(i915) || IS_ROCKETLAKE(i915) || IS_TIGERLAKE(i915)) {
/*
@@ -2782,6 +2791,11 @@ xcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
RING_SEMA_WAIT_POLL(engine->mmio_base),
1);
}
+ /* Wa_16018031267, Wa_16018063123 */
+ if (NEEDS_FASTCOLOR_BLT_WABB(engine))
+ wa_masked_field_set(wal, ECOSKPD(engine->mmio_base),
+ XEHP_BLITTER_SCHEDULING_MODE_MASK,
+ XEHP_BLITTER_ROUND_ROBIN_MODE);
}
static void
@@ -2915,6 +2929,9 @@ general_render_compute_wa_init(struct intel_engine_cs *engine, struct i915_wa_li
* Wa_22015475538:dg2
*/
wa_mcr_write_or(wal, LSC_CHICKEN_BIT_0_UDW, DIS_CHAIN_2XSIMD8);
+
+ /* Wa_18028616096 */
+ wa_mcr_write_or(wal, LSC_CHICKEN_BIT_0_UDW, UGM_FRAGMENT_THRESHOLD_TO_3);
}
if (IS_DG2_G11(i915)) {
@@ -2943,11 +2960,6 @@ general_render_compute_wa_init(struct intel_engine_cs *engine, struct i915_wa_li
true);
}
- if (IS_DG2_G10(i915) || IS_DG2_G12(i915)) {
- /* Wa_18028616096 */
- wa_mcr_write_or(wal, LSC_CHICKEN_BIT_0_UDW, UGM_FRAGMENT_THRESHOLD_TO_3);
- }
-
if (IS_XEHPSDV(i915)) {
/* Wa_1409954639 */
wa_mcr_masked_en(wal,
diff --git a/drivers/gpu/drm/i915/gt/selftest_engine_cs.c b/drivers/gpu/drm/i915/gt/selftest_engine_cs.c
index 86cecf7a1105..5ffa5e30f419 100644
--- a/drivers/gpu/drm/i915/gt/selftest_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/selftest_engine_cs.c
@@ -21,20 +21,22 @@ static int cmp_u32(const void *A, const void *B)
return *a - *b;
}
-static void perf_begin(struct intel_gt *gt)
+static intel_wakeref_t perf_begin(struct intel_gt *gt)
{
- intel_gt_pm_get(gt);
+ intel_wakeref_t wakeref = intel_gt_pm_get(gt);
/* Boost gpufreq to max [waitboost] and keep it fixed */
atomic_inc(&gt->rps.num_waiters);
queue_work(gt->i915->unordered_wq, &gt->rps.work);
flush_work(&gt->rps.work);
+
+ return wakeref;
}
-static int perf_end(struct intel_gt *gt)
+static int perf_end(struct intel_gt *gt, intel_wakeref_t wakeref)
{
atomic_dec(&gt->rps.num_waiters);
- intel_gt_pm_put(gt);
+ intel_gt_pm_put(gt, wakeref);
return igt_flush_test(gt->i915);
}
@@ -133,12 +135,13 @@ static int perf_mi_bb_start(void *arg)
struct intel_gt *gt = arg;
struct intel_engine_cs *engine;
enum intel_engine_id id;
+ intel_wakeref_t wakeref;
int err = 0;
if (GRAPHICS_VER(gt->i915) < 4) /* Any CS_TIMESTAMP? */
return 0;
- perf_begin(gt);
+ wakeref = perf_begin(gt);
for_each_engine(engine, gt, id) {
struct intel_context *ce = engine->kernel_context;
struct i915_vma *batch;
@@ -207,7 +210,7 @@ out:
pr_info("%s: MI_BB_START cycles: %u\n",
engine->name, trifilter(cycles));
}
- if (perf_end(gt))
+ if (perf_end(gt, wakeref))
err = -EIO;
return err;
@@ -260,12 +263,13 @@ static int perf_mi_noop(void *arg)
struct intel_gt *gt = arg;
struct intel_engine_cs *engine;
enum intel_engine_id id;
+ intel_wakeref_t wakeref;
int err = 0;
if (GRAPHICS_VER(gt->i915) < 4) /* Any CS_TIMESTAMP? */
return 0;
- perf_begin(gt);
+ wakeref = perf_begin(gt);
for_each_engine(engine, gt, id) {
struct intel_context *ce = engine->kernel_context;
struct i915_vma *base, *nop;
@@ -364,7 +368,7 @@ out:
pr_info("%s: 16K MI_NOOP cycles: %u\n",
engine->name, trifilter(cycles));
}
- if (perf_end(gt))
+ if (perf_end(gt, wakeref))
err = -EIO;
return err;
diff --git a/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c b/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c
index 273d440a53e3..bc441ce7b380 100644
--- a/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c
+++ b/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c
@@ -84,7 +84,7 @@ static struct pulse *pulse_create(void)
static void pulse_unlock_wait(struct pulse *p)
{
- i915_active_unlock_wait(&p->active);
+ wait_var_event_timeout(&p->active, i915_active_is_idle(&p->active), HZ);
}
static int __live_idle_pulse(struct intel_engine_cs *engine,
diff --git a/drivers/gpu/drm/i915/gt/selftest_gt_pm.c b/drivers/gpu/drm/i915/gt/selftest_gt_pm.c
index 0971241707ce..33351deeea4f 100644
--- a/drivers/gpu/drm/i915/gt/selftest_gt_pm.c
+++ b/drivers/gpu/drm/i915/gt/selftest_gt_pm.c
@@ -81,6 +81,7 @@ static int live_gt_clocks(void *arg)
struct intel_gt *gt = arg;
struct intel_engine_cs *engine;
enum intel_engine_id id;
+ intel_wakeref_t wakeref;
int err = 0;
if (!gt->clock_frequency) { /* unknown */
@@ -91,7 +92,7 @@ static int live_gt_clocks(void *arg)
if (GRAPHICS_VER(gt->i915) < 4) /* Any CS_TIMESTAMP? */
return 0;
- intel_gt_pm_get(gt);
+ wakeref = intel_gt_pm_get(gt);
intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_ALL);
for_each_engine(engine, gt, id) {
@@ -128,7 +129,7 @@ static int live_gt_clocks(void *arg)
}
intel_uncore_forcewake_put(gt->uncore, FORCEWAKE_ALL);
- intel_gt_pm_put(gt);
+ intel_gt_pm_put(gt, wakeref);
return err;
}
diff --git a/drivers/gpu/drm/i915/gt/selftest_lrc.c b/drivers/gpu/drm/i915/gt/selftest_lrc.c
index 5f826b6dcf5d..e17b8777d21d 100644
--- a/drivers/gpu/drm/i915/gt/selftest_lrc.c
+++ b/drivers/gpu/drm/i915/gt/selftest_lrc.c
@@ -1555,7 +1555,7 @@ static int live_lrc_isolation(void *arg)
return err;
}
-static int indirect_ctx_submit_req(struct intel_context *ce)
+static int wabb_ctx_submit_req(struct intel_context *ce)
{
struct i915_request *rq;
int err = 0;
@@ -1579,7 +1579,8 @@ static int indirect_ctx_submit_req(struct intel_context *ce)
#define CTX_BB_CANARY_INDEX (CTX_BB_CANARY_OFFSET / sizeof(u32))
static u32 *
-emit_indirect_ctx_bb_canary(const struct intel_context *ce, u32 *cs)
+emit_wabb_ctx_canary(const struct intel_context *ce,
+ u32 *cs, bool per_ctx)
{
*cs++ = MI_STORE_REGISTER_MEM_GEN8 |
MI_SRM_LRM_GLOBAL_GTT |
@@ -1587,26 +1588,43 @@ emit_indirect_ctx_bb_canary(const struct intel_context *ce, u32 *cs)
*cs++ = i915_mmio_reg_offset(RING_START(0));
*cs++ = i915_ggtt_offset(ce->state) +
context_wa_bb_offset(ce) +
- CTX_BB_CANARY_OFFSET;
+ CTX_BB_CANARY_OFFSET +
+ (per_ctx ? PAGE_SIZE : 0);
*cs++ = 0;
return cs;
}
+static u32 *
+emit_indirect_ctx_bb_canary(const struct intel_context *ce, u32 *cs)
+{
+ return emit_wabb_ctx_canary(ce, cs, false);
+}
+
+static u32 *
+emit_per_ctx_bb_canary(const struct intel_context *ce, u32 *cs)
+{
+ return emit_wabb_ctx_canary(ce, cs, true);
+}
+
static void
-indirect_ctx_bb_setup(struct intel_context *ce)
+wabb_ctx_setup(struct intel_context *ce, bool per_ctx)
{
- u32 *cs = context_indirect_bb(ce);
+ u32 *cs = context_wabb(ce, per_ctx);
cs[CTX_BB_CANARY_INDEX] = 0xdeadf00d;
- setup_indirect_ctx_bb(ce, ce->engine, emit_indirect_ctx_bb_canary);
+ if (per_ctx)
+ setup_per_ctx_bb(ce, ce->engine, emit_per_ctx_bb_canary);
+ else
+ setup_indirect_ctx_bb(ce, ce->engine, emit_indirect_ctx_bb_canary);
}
-static bool check_ring_start(struct intel_context *ce)
+static bool check_ring_start(struct intel_context *ce, bool per_ctx)
{
const u32 * const ctx_bb = (void *)(ce->lrc_reg_state) -
- LRC_STATE_OFFSET + context_wa_bb_offset(ce);
+ LRC_STATE_OFFSET + context_wa_bb_offset(ce) +
+ (per_ctx ? PAGE_SIZE : 0);
if (ctx_bb[CTX_BB_CANARY_INDEX] == ce->lrc_reg_state[CTX_RING_START])
return true;
@@ -1618,21 +1636,21 @@ static bool check_ring_start(struct intel_context *ce)
return false;
}
-static int indirect_ctx_bb_check(struct intel_context *ce)
+static int wabb_ctx_check(struct intel_context *ce, bool per_ctx)
{
int err;
- err = indirect_ctx_submit_req(ce);
+ err = wabb_ctx_submit_req(ce);
if (err)
return err;
- if (!check_ring_start(ce))
+ if (!check_ring_start(ce, per_ctx))
return -EINVAL;
return 0;
}
-static int __live_lrc_indirect_ctx_bb(struct intel_engine_cs *engine)
+static int __lrc_wabb_ctx(struct intel_engine_cs *engine, bool per_ctx)
{
struct intel_context *a, *b;
int err;
@@ -1667,14 +1685,14 @@ static int __live_lrc_indirect_ctx_bb(struct intel_engine_cs *engine)
* As ring start is restored apriori of starting the indirect ctx bb and
* as it will be different for each context, it fits to this purpose.
*/
- indirect_ctx_bb_setup(a);
- indirect_ctx_bb_setup(b);
+ wabb_ctx_setup(a, per_ctx);
+ wabb_ctx_setup(b, per_ctx);
- err = indirect_ctx_bb_check(a);
+ err = wabb_ctx_check(a, per_ctx);
if (err)
goto unpin_b;
- err = indirect_ctx_bb_check(b);
+ err = wabb_ctx_check(b, per_ctx);
unpin_b:
intel_context_unpin(b);
@@ -1688,7 +1706,7 @@ put_a:
return err;
}
-static int live_lrc_indirect_ctx_bb(void *arg)
+static int lrc_wabb_ctx(void *arg, bool per_ctx)
{
struct intel_gt *gt = arg;
struct intel_engine_cs *engine;
@@ -1697,7 +1715,7 @@ static int live_lrc_indirect_ctx_bb(void *arg)
for_each_engine(engine, gt, id) {
intel_engine_pm_get(engine);
- err = __live_lrc_indirect_ctx_bb(engine);
+ err = __lrc_wabb_ctx(engine, per_ctx);
intel_engine_pm_put(engine);
if (igt_flush_test(gt->i915))
@@ -1710,6 +1728,16 @@ static int live_lrc_indirect_ctx_bb(void *arg)
return err;
}
+static int live_lrc_indirect_ctx_bb(void *arg)
+{
+ return lrc_wabb_ctx(arg, false);
+}
+
+static int live_lrc_per_ctx_bb(void *arg)
+{
+ return lrc_wabb_ctx(arg, true);
+}
+
static void garbage_reset(struct intel_engine_cs *engine,
struct i915_request *rq)
{
@@ -1947,6 +1975,7 @@ int intel_lrc_live_selftests(struct drm_i915_private *i915)
SUBTEST(live_lrc_garbage),
SUBTEST(live_pphwsp_runtime),
SUBTEST(live_lrc_indirect_ctx_bb),
+ SUBTEST(live_lrc_per_ctx_bb),
};
if (!HAS_LOGICAL_RING_CONTEXTS(i915))
diff --git a/drivers/gpu/drm/i915/gt/selftest_reset.c b/drivers/gpu/drm/i915/gt/selftest_reset.c
index 79aa6ac66ad2..f40de408cd3a 100644
--- a/drivers/gpu/drm/i915/gt/selftest_reset.c
+++ b/drivers/gpu/drm/i915/gt/selftest_reset.c
@@ -261,11 +261,12 @@ static int igt_atomic_reset(void *arg)
{
struct intel_gt *gt = arg;
const typeof(*igt_atomic_phases) *p;
+ intel_wakeref_t wakeref;
int err = 0;
/* Check that the resets are usable from atomic context */
- intel_gt_pm_get(gt);
+ wakeref = intel_gt_pm_get(gt);
igt_global_reset_lock(gt);
/* Flush any requests before we get started and check basics */
@@ -296,7 +297,7 @@ static int igt_atomic_reset(void *arg)
unlock:
igt_global_reset_unlock(gt);
- intel_gt_pm_put(gt);
+ intel_gt_pm_put(gt, wakeref);
return err;
}
@@ -307,6 +308,7 @@ static int igt_atomic_engine_reset(void *arg)
const typeof(*igt_atomic_phases) *p;
struct intel_engine_cs *engine;
enum intel_engine_id id;
+ intel_wakeref_t wakeref;
int err = 0;
/* Check that the resets are usable from atomic context */
@@ -317,7 +319,7 @@ static int igt_atomic_engine_reset(void *arg)
if (intel_uc_uses_guc_submission(&gt->uc))
return 0;
- intel_gt_pm_get(gt);
+ wakeref = intel_gt_pm_get(gt);
igt_global_reset_lock(gt);
/* Flush any requests before we get started and check basics */
@@ -365,7 +367,7 @@ static int igt_atomic_engine_reset(void *arg)
out_unlock:
igt_global_reset_unlock(gt);
- intel_gt_pm_put(gt);
+ intel_gt_pm_put(gt, wakeref);
return err;
}
diff --git a/drivers/gpu/drm/i915/gt/selftest_rps.c b/drivers/gpu/drm/i915/gt/selftest_rps.c
index fb30f733b036..dcef8d498919 100644
--- a/drivers/gpu/drm/i915/gt/selftest_rps.c
+++ b/drivers/gpu/drm/i915/gt/selftest_rps.c
@@ -224,6 +224,7 @@ int live_rps_clock_interval(void *arg)
struct intel_engine_cs *engine;
enum intel_engine_id id;
struct igt_spinner spin;
+ intel_wakeref_t wakeref;
int err = 0;
if (!intel_rps_is_enabled(rps) || GRAPHICS_VER(gt->i915) < 6)
@@ -236,7 +237,7 @@ int live_rps_clock_interval(void *arg)
saved_work = rps->work.func;
rps->work.func = dummy_rps_work;
- intel_gt_pm_get(gt);
+ wakeref = intel_gt_pm_get(gt);
intel_rps_disable(&gt->rps);
intel_gt_check_clock_frequency(gt);
@@ -355,7 +356,7 @@ int live_rps_clock_interval(void *arg)
}
intel_rps_enable(&gt->rps);
- intel_gt_pm_put(gt);
+ intel_gt_pm_put(gt, wakeref);
igt_spinner_fini(&spin);
@@ -376,6 +377,7 @@ int live_rps_control(void *arg)
struct intel_engine_cs *engine;
enum intel_engine_id id;
struct igt_spinner spin;
+ intel_wakeref_t wakeref;
int err = 0;
/*
@@ -398,7 +400,7 @@ int live_rps_control(void *arg)
saved_work = rps->work.func;
rps->work.func = dummy_rps_work;
- intel_gt_pm_get(gt);
+ wakeref = intel_gt_pm_get(gt);
for_each_engine(engine, gt, id) {
struct i915_request *rq;
ktime_t min_dt, max_dt;
@@ -488,7 +490,7 @@ int live_rps_control(void *arg)
break;
}
}
- intel_gt_pm_put(gt);
+ intel_gt_pm_put(gt, wakeref);
igt_spinner_fini(&spin);
@@ -1023,6 +1025,7 @@ int live_rps_interrupt(void *arg)
struct intel_engine_cs *engine;
enum intel_engine_id id;
struct igt_spinner spin;
+ intel_wakeref_t wakeref;
u32 pm_events;
int err = 0;
@@ -1033,9 +1036,9 @@ int live_rps_interrupt(void *arg)
if (!intel_rps_has_interrupts(rps) || GRAPHICS_VER(gt->i915) < 6)
return 0;
- intel_gt_pm_get(gt);
- pm_events = rps->pm_events;
- intel_gt_pm_put(gt);
+ pm_events = 0;
+ with_intel_gt_pm(gt, wakeref)
+ pm_events = rps->pm_events;
if (!pm_events) {
pr_err("No RPS PM events registered, but RPS is enabled?\n");
return -ENODEV;
diff --git a/drivers/gpu/drm/i915/gt/selftest_slpc.c b/drivers/gpu/drm/i915/gt/selftest_slpc.c
index 952c8d52d68a..302d0540295d 100644
--- a/drivers/gpu/drm/i915/gt/selftest_slpc.c
+++ b/drivers/gpu/drm/i915/gt/selftest_slpc.c
@@ -266,6 +266,7 @@ static int run_test(struct intel_gt *gt, int test_type)
struct intel_rps *rps = &gt->rps;
struct intel_engine_cs *engine;
enum intel_engine_id id;
+ intel_wakeref_t wakeref;
struct igt_spinner spin;
u32 slpc_min_freq, slpc_max_freq;
int err = 0;
@@ -311,7 +312,7 @@ static int run_test(struct intel_gt *gt, int test_type)
}
intel_gt_pm_wait_for_idle(gt);
- intel_gt_pm_get(gt);
+ wakeref = intel_gt_pm_get(gt);
for_each_engine(engine, gt, id) {
struct i915_request *rq;
u32 max_act_freq;
@@ -397,7 +398,7 @@ static int run_test(struct intel_gt *gt, int test_type)
if (igt_flush_test(gt->i915))
err = -EIO;
- intel_gt_pm_put(gt);
+ intel_gt_pm_put(gt, wakeref);
igt_spinner_fini(&spin);
intel_gt_pm_wait_for_idle(gt);
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_gsc_proxy.c b/drivers/gpu/drm/i915/gt/uc/intel_gsc_proxy.c
index 5f138de3c14f..40817ebcca71 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_gsc_proxy.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_gsc_proxy.c
@@ -322,6 +322,7 @@ static int i915_gsc_proxy_component_bind(struct device *i915_kdev,
gsc->proxy.component = data;
gsc->proxy.component->mei_dev = mei_kdev;
mutex_unlock(&gsc->proxy.mutex);
+ gt_dbg(gt, "GSC proxy mei component bound\n");
return 0;
}
@@ -342,6 +343,7 @@ static void i915_gsc_proxy_component_unbind(struct device *i915_kdev,
with_intel_runtime_pm(&i915->runtime_pm, wakeref)
intel_uncore_rmw(gt->uncore, HECI_H_CSR(MTL_GSC_HECI2_BASE),
HECI_H_CSR_IE | HECI_H_CSR_RST, 0);
+ gt_dbg(gt, "GSC proxy mei component unbound\n");
}
static const struct component_ops i915_gsc_proxy_component_ops = {
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.c b/drivers/gpu/drm/i915/gt/uc/intel_guc.c
index 3f3df1166b86..2b450c43bbd7 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.c
@@ -330,7 +330,7 @@ static u32 guc_ctl_wa_flags(struct intel_guc *guc)
static u32 guc_ctl_devid(struct intel_guc *guc)
{
- struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
+ struct drm_i915_private *i915 = guc_to_i915(guc);
return (INTEL_DEVID(i915) << 16) | INTEL_REVID(i915);
}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.h b/drivers/gpu/drm/i915/gt/uc/intel_guc.h
index 2b6dfe62c8f2..e22c12ce245a 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.h
@@ -297,6 +297,10 @@ struct intel_guc {
* @number_guc_id_stolen: The number of guc_ids that have been stolen
*/
int number_guc_id_stolen;
+ /**
+ * @fast_response_selftest: Backdoor to CT handler for fast response selftest
+ */
+ u32 fast_response_selftest;
#endif
};
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c
index a4da0208c883..a1cd40d80517 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c
@@ -355,7 +355,7 @@ guc_capture_alloc_steered_lists(struct intel_guc *guc,
static const struct __guc_mmio_reg_descr_group *
guc_capture_get_device_reglist(struct intel_guc *guc)
{
- struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
+ struct drm_i915_private *i915 = guc_to_i915(guc);
const struct __guc_mmio_reg_descr_group *lists;
if (GRAPHICS_VER(i915) >= 12)
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c
index 89e314b3756b..0d5197c0824a 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c
@@ -265,7 +265,7 @@ int intel_guc_ct_init(struct intel_guc_ct *ct)
u32 *cmds;
int err;
- err = i915_inject_probe_error(guc_to_gt(guc)->i915, -ENXIO);
+ err = i915_inject_probe_error(guc_to_i915(guc), -ENXIO);
if (err)
return err;
@@ -1076,6 +1076,15 @@ static int ct_handle_response(struct intel_guc_ct *ct, struct ct_incoming_msg *r
found = true;
break;
}
+
+#ifdef CONFIG_DRM_I915_SELFTEST
+ if (!found && ct_to_guc(ct)->fast_response_selftest) {
+ CT_DEBUG(ct, "Assuming unsolicited response due to FAST_REQUEST selftest\n");
+ ct_to_guc(ct)->fast_response_selftest++;
+ found = true;
+ }
+#endif
+
if (!found) {
CT_ERROR(ct, "Unsolicited response message: len %u, data %#x (fence %u, last %u)\n",
len, hxg[0], fence, ct->requests.last_fence);
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c
index 55bc8b55fbc0..bf16351c9349 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c
@@ -520,7 +520,7 @@ void intel_guc_log_init_early(struct intel_guc_log *log)
static int guc_log_relay_create(struct intel_guc_log *log)
{
struct intel_guc *guc = log_to_guc(log);
- struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
+ struct drm_i915_private *i915 = guc_to_i915(guc);
struct rchan *guc_log_relay_chan;
size_t n_subbufs, subbuf_size;
int ret;
@@ -573,7 +573,7 @@ static void guc_log_relay_destroy(struct intel_guc_log *log)
static void guc_log_copy_debuglogs_for_relay(struct intel_guc_log *log)
{
struct intel_guc *guc = log_to_guc(log);
- struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
+ struct drm_i915_private *i915 = guc_to_i915(guc);
intel_wakeref_t wakeref;
_guc_log_copy_debuglogs_for_relay(log);
@@ -589,7 +589,7 @@ static void guc_log_copy_debuglogs_for_relay(struct intel_guc_log *log)
static u32 __get_default_log_level(struct intel_guc_log *log)
{
struct intel_guc *guc = log_to_guc(log);
- struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
+ struct drm_i915_private *i915 = guc_to_i915(guc);
/* A negative value means "use platform/config default" */
if (i915->params.guc_log_level < 0) {
@@ -664,7 +664,7 @@ void intel_guc_log_destroy(struct intel_guc_log *log)
int intel_guc_log_set_level(struct intel_guc_log *log, u32 level)
{
struct intel_guc *guc = log_to_guc(log);
- struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
+ struct drm_i915_private *i915 = guc_to_i915(guc);
intel_wakeref_t wakeref;
int ret = 0;
@@ -796,7 +796,7 @@ void intel_guc_log_relay_flush(struct intel_guc_log *log)
static void guc_log_relay_stop(struct intel_guc_log *log)
{
struct intel_guc *guc = log_to_guc(log);
- struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
+ struct drm_i915_private *i915 = guc_to_i915(guc);
if (!log->relay.started)
return;
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_rc.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_rc.c
index 1adec6de223c..9df7927304ae 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_rc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_rc.c
@@ -14,7 +14,7 @@ static bool __guc_rc_supported(struct intel_guc *guc)
{
/* GuC RC is unavailable for pre-Gen12 */
return guc->submission_supported &&
- GRAPHICS_VER(guc_to_gt(guc)->i915) >= 12;
+ GRAPHICS_VER(guc_to_i915(guc)) >= 12;
}
static bool __guc_rc_selected(struct intel_guc *guc)
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c
index 2dfb07cc4b33..3e681ab6fbf9 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c
@@ -34,7 +34,7 @@ static bool __detect_slpc_supported(struct intel_guc *guc)
{
/* GuC SLPC is unavailable for pre-Gen12 */
return guc->submission_supported &&
- GRAPHICS_VER(guc_to_gt(guc)->i915) >= 12;
+ GRAPHICS_VER(guc_to_i915(guc)) >= 12;
}
static bool __guc_slpc_selected(struct intel_guc *guc)
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
index d37698bd6b91..a259f1118c5a 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
@@ -1107,7 +1107,7 @@ static void scrub_guc_desc_for_outstanding_g2h(struct intel_guc *guc)
if (deregister)
guc_signal_context_fence(ce);
if (destroyed) {
- intel_gt_pm_put_async(guc_to_gt(guc));
+ intel_gt_pm_put_async_untracked(guc_to_gt(guc));
release_guc_id(guc, ce);
__guc_context_destroy(ce);
}
@@ -1303,6 +1303,7 @@ static ktime_t guc_engine_busyness(struct intel_engine_cs *engine, ktime_t *now)
unsigned long flags;
u32 reset_count;
bool in_reset;
+ intel_wakeref_t wakeref;
spin_lock_irqsave(&guc->timestamp.lock, flags);
@@ -1325,7 +1326,8 @@ static ktime_t guc_engine_busyness(struct intel_engine_cs *engine, ktime_t *now)
* start_gt_clk is derived from GuC state. To get a consistent
* view of activity, we query the GuC state only if gt is awake.
*/
- if (!in_reset && intel_gt_pm_get_if_awake(gt)) {
+ wakeref = in_reset ? 0 : intel_gt_pm_get_if_awake(gt);
+ if (wakeref) {
stats_saved = *stats;
gt_stamp_saved = guc->timestamp.gt_stamp;
/*
@@ -1334,7 +1336,7 @@ static ktime_t guc_engine_busyness(struct intel_engine_cs *engine, ktime_t *now)
*/
guc_update_engine_gt_clks(engine);
guc_update_pm_timestamp(guc, now);
- intel_gt_pm_put_async(gt);
+ intel_gt_pm_put_async(gt, wakeref);
if (i915_reset_count(gpu_error) != reset_count) {
*stats = stats_saved;
guc->timestamp.gt_stamp = gt_stamp_saved;
@@ -3385,9 +3387,9 @@ static void destroyed_worker_func(struct work_struct *w)
struct intel_guc *guc = container_of(w, struct intel_guc,
submission_state.destroyed_worker);
struct intel_gt *gt = guc_to_gt(guc);
- int tmp;
+ intel_wakeref_t wakeref;
- with_intel_gt_pm(gt, tmp)
+ with_intel_gt_pm(gt, wakeref)
deregister_destroyed_contexts(guc);
}
@@ -4624,12 +4626,12 @@ static bool __guc_submission_supported(struct intel_guc *guc)
{
/* GuC submission is unavailable for pre-Gen11 */
return intel_guc_is_supported(guc) &&
- GRAPHICS_VER(guc_to_gt(guc)->i915) >= 11;
+ GRAPHICS_VER(guc_to_i915(guc)) >= 11;
}
static bool __guc_submission_selected(struct intel_guc *guc)
{
- struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
+ struct drm_i915_private *i915 = guc_to_i915(guc);
if (!intel_guc_submission_is_supported(guc))
return false;
@@ -4894,7 +4896,7 @@ int intel_guc_deregister_done_process_msg(struct intel_guc *guc,
intel_context_put(ce);
} else if (context_destroyed(ce)) {
/* Context has been destroyed */
- intel_gt_pm_put_async(guc_to_gt(guc));
+ intel_gt_pm_put_async_untracked(guc_to_gt(guc));
release_guc_id(guc, ce);
__guc_context_destroy(ce);
}
@@ -5001,7 +5003,8 @@ static void capture_error_state(struct intel_guc *guc,
if (match) {
intel_engine_set_hung_context(e, ce);
engine_mask |= e->mask;
- atomic_inc(&i915->gpu_error.reset_engine_count[e->uabi_class]);
+ i915_increase_reset_engine_count(&i915->gpu_error,
+ e);
}
}
@@ -5013,7 +5016,7 @@ static void capture_error_state(struct intel_guc *guc,
} else {
intel_engine_set_hung_context(ce->engine, ce);
engine_mask = ce->engine->mask;
- atomic_inc(&i915->gpu_error.reset_engine_count[ce->engine->uabi_class]);
+ i915_increase_reset_engine_count(&i915->gpu_error, ce->engine);
}
with_intel_runtime_pm(&i915->runtime_pm, wakeref)
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc.c b/drivers/gpu/drm/i915/gt/uc/intel_uc.c
index 27f6561dd731..3872d309ed31 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_uc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc.c
@@ -106,11 +106,6 @@ static void __confirm_options(struct intel_uc *uc)
gt_info(gt, "Incompatible option enable_guc=%d - %s\n",
i915->params.enable_guc, "GuC is not supported!");
- if (i915->params.enable_guc & ENABLE_GUC_LOAD_HUC &&
- !intel_uc_supports_huc(uc))
- gt_info(gt, "Incompatible option enable_guc=%d - %s\n",
- i915->params.enable_guc, "HuC is not supported!");
-
if (i915->params.enable_guc & ENABLE_GUC_SUBMISSION &&
!intel_uc_supports_guc_submission(uc))
gt_info(gt, "Incompatible option enable_guc=%d - %s\n",
diff --git a/drivers/gpu/drm/i915/gt/uc/selftest_guc.c b/drivers/gpu/drm/i915/gt/uc/selftest_guc.c
index bfb72143566f..c900aac85adb 100644
--- a/drivers/gpu/drm/i915/gt/uc/selftest_guc.c
+++ b/drivers/gpu/drm/i915/gt/uc/selftest_guc.c
@@ -286,11 +286,126 @@ err_wakeref:
return ret;
}
+/*
+ * Send a context schedule H2G message with an invalid context id.
+ * This should generate a GUC_RESULT_INVALID_CONTEXT response.
+ */
+static int bad_h2g(struct intel_guc *guc)
+{
+ u32 action[] = {
+ INTEL_GUC_ACTION_SCHED_CONTEXT,
+ 0x12345678,
+ };
+
+ return intel_guc_send_nb(guc, action, ARRAY_SIZE(action), 0);
+}
+
+/*
+ * Set a spinner running to make sure the system is alive and active,
+ * then send a bad but asynchronous H2G command and wait to see if an
+ * error response is returned. If no response is received or if the
+ * spinner dies then the test will fail.
+ */
+#define FAST_RESPONSE_TIMEOUT_MS 1000
+static int intel_guc_fast_request(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_context *ce;
+ struct igt_spinner spin;
+ struct i915_request *rq;
+ intel_wakeref_t wakeref;
+ struct intel_engine_cs *engine = intel_selftest_find_any_engine(gt);
+ bool spinning = false;
+ int ret = 0;
+
+ if (!engine)
+ return 0;
+
+ wakeref = intel_runtime_pm_get(gt->uncore->rpm);
+
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce)) {
+ ret = PTR_ERR(ce);
+ gt_err(gt, "Failed to create spinner request: %pe\n", ce);
+ goto err_pm;
+ }
+
+ ret = igt_spinner_init(&spin, engine->gt);
+ if (ret) {
+ gt_err(gt, "Failed to create spinner: %pe\n", ERR_PTR(ret));
+ goto err_pm;
+ }
+ spinning = true;
+
+ rq = igt_spinner_create_request(&spin, ce, MI_ARB_CHECK);
+ intel_context_put(ce);
+ if (IS_ERR(rq)) {
+ ret = PTR_ERR(rq);
+ gt_err(gt, "Failed to create spinner request: %pe\n", rq);
+ goto err_spin;
+ }
+
+ ret = request_add_spin(rq, &spin);
+ if (ret) {
+ gt_err(gt, "Failed to add Spinner request: %pe\n", ERR_PTR(ret));
+ goto err_rq;
+ }
+
+ gt->uc.guc.fast_response_selftest = 1;
+
+ ret = bad_h2g(&gt->uc.guc);
+ if (ret) {
+ gt_err(gt, "Failed to send H2G: %pe\n", ERR_PTR(ret));
+ goto err_rq;
+ }
+
+ ret = wait_for(gt->uc.guc.fast_response_selftest != 1 || i915_request_completed(rq),
+ FAST_RESPONSE_TIMEOUT_MS);
+ if (ret) {
+ gt_err(gt, "Request wait failed: %pe\n", ERR_PTR(ret));
+ goto err_rq;
+ }
+
+ if (i915_request_completed(rq)) {
+ gt_err(gt, "Spinner died waiting for fast request error!\n");
+ ret = -EIO;
+ goto err_rq;
+ }
+
+ if (gt->uc.guc.fast_response_selftest != 2) {
+ gt_err(gt, "Unexpected fast response count: %d\n",
+ gt->uc.guc.fast_response_selftest);
+ goto err_rq;
+ }
+
+ igt_spinner_end(&spin);
+ spinning = false;
+
+ ret = intel_selftest_wait_for_rq(rq);
+ if (ret) {
+ gt_err(gt, "Request failed to complete: %pe\n", ERR_PTR(ret));
+ goto err_rq;
+ }
+
+err_rq:
+ i915_request_put(rq);
+
+err_spin:
+ if (spinning)
+ igt_spinner_end(&spin);
+ igt_spinner_fini(&spin);
+
+err_pm:
+ intel_runtime_pm_put(gt->uncore->rpm, wakeref);
+ return ret;
+}
+
int intel_guc_live_selftests(struct drm_i915_private *i915)
{
static const struct i915_subtest tests[] = {
SUBTEST(intel_guc_scrub_ctbs),
SUBTEST(intel_guc_steal_guc_ids),
+ SUBTEST(intel_guc_fast_request),
};
struct intel_gt *gt = to_gt(i915);
diff --git a/drivers/gpu/drm/i915/gt/uc/selftest_guc_hangcheck.c b/drivers/gpu/drm/i915/gt/uc/selftest_guc_hangcheck.c
index 34b5d952e2bc..26fdc392fce6 100644
--- a/drivers/gpu/drm/i915/gt/uc/selftest_guc_hangcheck.c
+++ b/drivers/gpu/drm/i915/gt/uc/selftest_guc_hangcheck.c
@@ -74,7 +74,7 @@ static int intel_hang_guc(void *arg)
goto err;
}
- rq = igt_spinner_create_request(&spin, ce, MI_NOOP);
+ rq = igt_spinner_create_request(&spin, ce, MI_ARB_CHECK);
intel_context_put(ce);
if (IS_ERR(rq)) {
ret = PTR_ERR(rq);
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index bfe92d2402ea..db99c2ef66db 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -51,6 +51,7 @@
#include "i915_debugfs.h"
#include "i915_debugfs_params.h"
#include "i915_driver.h"
+#include "i915_gpu_error.h"
#include "i915_irq.h"
#include "i915_reg.h"
#include "i915_scheduler.h"
@@ -299,107 +300,6 @@ static int i915_gem_object_info(struct seq_file *m, void *data)
return 0;
}
-#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
-static ssize_t gpu_state_read(struct file *file, char __user *ubuf,
- size_t count, loff_t *pos)
-{
- struct i915_gpu_coredump *error;
- ssize_t ret;
- void *buf;
-
- error = file->private_data;
- if (!error)
- return 0;
-
- /* Bounce buffer required because of kernfs __user API convenience. */
- buf = kmalloc(count, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
-
- ret = i915_gpu_coredump_copy_to_buffer(error, buf, *pos, count);
- if (ret <= 0)
- goto out;
-
- if (!copy_to_user(ubuf, buf, ret))
- *pos += ret;
- else
- ret = -EFAULT;
-
-out:
- kfree(buf);
- return ret;
-}
-
-static int gpu_state_release(struct inode *inode, struct file *file)
-{
- i915_gpu_coredump_put(file->private_data);
- return 0;
-}
-
-static int i915_gpu_info_open(struct inode *inode, struct file *file)
-{
- struct drm_i915_private *i915 = inode->i_private;
- struct i915_gpu_coredump *gpu;
- intel_wakeref_t wakeref;
-
- gpu = NULL;
- with_intel_runtime_pm(&i915->runtime_pm, wakeref)
- gpu = i915_gpu_coredump(to_gt(i915), ALL_ENGINES, CORE_DUMP_FLAG_NONE);
-
- if (IS_ERR(gpu))
- return PTR_ERR(gpu);
-
- file->private_data = gpu;
- return 0;
-}
-
-static const struct file_operations i915_gpu_info_fops = {
- .owner = THIS_MODULE,
- .open = i915_gpu_info_open,
- .read = gpu_state_read,
- .llseek = default_llseek,
- .release = gpu_state_release,
-};
-
-static ssize_t
-i915_error_state_write(struct file *filp,
- const char __user *ubuf,
- size_t cnt,
- loff_t *ppos)
-{
- struct i915_gpu_coredump *error = filp->private_data;
-
- if (!error)
- return 0;
-
- drm_dbg(&error->i915->drm, "Resetting error state\n");
- i915_reset_error_state(error->i915);
-
- return cnt;
-}
-
-static int i915_error_state_open(struct inode *inode, struct file *file)
-{
- struct i915_gpu_coredump *error;
-
- error = i915_first_error_state(inode->i_private);
- if (IS_ERR(error))
- return PTR_ERR(error);
-
- file->private_data = error;
- return 0;
-}
-
-static const struct file_operations i915_error_state_fops = {
- .owner = THIS_MODULE,
- .open = i915_error_state_open,
- .read = gpu_state_read,
- .write = i915_error_state_write,
- .llseek = default_llseek,
- .release = gpu_state_release,
-};
-#endif
-
static int i915_frequency_info(struct seq_file *m, void *unused)
{
struct drm_i915_private *i915 = node_to_i915(m->private);
@@ -839,10 +739,6 @@ static const struct i915_debugfs_files {
{"i915_perf_noa_delay", &i915_perf_noa_delay_fops},
{"i915_wedged", &i915_wedged_fops},
{"i915_gem_drop_caches", &i915_drop_caches_fops},
-#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
- {"i915_error_state", &i915_error_state_fops},
- {"i915_gpu_info", &i915_gpu_info_fops},
-#endif
};
void i915_debugfs_register(struct drm_i915_private *dev_priv)
@@ -865,4 +761,6 @@ void i915_debugfs_register(struct drm_i915_private *dev_priv)
drm_debugfs_create_files(i915_debugfs_list,
ARRAY_SIZE(i915_debugfs_list),
minor->debugfs_root, minor);
+
+ i915_gpu_error_debugfs_register(dev_priv);
}
diff --git a/drivers/gpu/drm/i915/i915_driver.c b/drivers/gpu/drm/i915/i915_driver.c
index 2a1faf403965..c7d7c3b7ecc6 100644
--- a/drivers/gpu/drm/i915/i915_driver.c
+++ b/drivers/gpu/drm/i915/i915_driver.c
@@ -798,7 +798,9 @@ int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (ret)
goto out_cleanup_modeset2;
- intel_pxp_init(i915);
+ ret = intel_pxp_init(i915);
+ if (ret != -ENODEV)
+ drm_dbg(&i915->drm, "pxp init failed with %d\n", ret);
ret = intel_display_driver_probe(i915);
if (ret)
@@ -1033,7 +1035,7 @@ void i915_driver_shutdown(struct drm_i915_private *i915)
intel_power_domains_driver_remove(i915);
enable_rpm_wakeref_asserts(&i915->runtime_pm);
- intel_runtime_pm_driver_release(&i915->runtime_pm);
+ intel_runtime_pm_driver_last_release(&i915->runtime_pm);
}
static bool suspend_to_idle(struct drm_i915_private *dev_priv)
diff --git a/drivers/gpu/drm/i915/i915_drm_client.c b/drivers/gpu/drm/i915/i915_drm_client.c
index 2a44b3876cb5..fa6852713bee 100644
--- a/drivers/gpu/drm/i915/i915_drm_client.c
+++ b/drivers/gpu/drm/i915/i915_drm_client.c
@@ -28,6 +28,10 @@ struct i915_drm_client *i915_drm_client_alloc(void)
kref_init(&client->kref);
spin_lock_init(&client->ctx_lock);
INIT_LIST_HEAD(&client->ctx_list);
+#ifdef CONFIG_PROC_FS
+ spin_lock_init(&client->objects_lock);
+ INIT_LIST_HEAD(&client->objects_list);
+#endif
return client;
}
@@ -41,6 +45,68 @@ void __i915_drm_client_free(struct kref *kref)
}
#ifdef CONFIG_PROC_FS
+static void
+obj_meminfo(struct drm_i915_gem_object *obj,
+ struct drm_memory_stats stats[INTEL_REGION_UNKNOWN])
+{
+ const enum intel_region_id id = obj->mm.region ?
+ obj->mm.region->id : INTEL_REGION_SMEM;
+ const u64 sz = obj->base.size;
+
+ if (obj->base.handle_count > 1)
+ stats[id].shared += sz;
+ else
+ stats[id].private += sz;
+
+ if (i915_gem_object_has_pages(obj)) {
+ stats[id].resident += sz;
+
+ if (!dma_resv_test_signaled(obj->base.resv,
+ DMA_RESV_USAGE_BOOKKEEP))
+ stats[id].active += sz;
+ else if (i915_gem_object_is_shrinkable(obj) &&
+ obj->mm.madv == I915_MADV_DONTNEED)
+ stats[id].purgeable += sz;
+ }
+}
+
+static void show_meminfo(struct drm_printer *p, struct drm_file *file)
+{
+ struct drm_memory_stats stats[INTEL_REGION_UNKNOWN] = {};
+ struct drm_i915_file_private *fpriv = file->driver_priv;
+ struct i915_drm_client *client = fpriv->client;
+ struct drm_i915_private *i915 = fpriv->i915;
+ struct drm_i915_gem_object *obj;
+ struct intel_memory_region *mr;
+ struct list_head __rcu *pos;
+ unsigned int id;
+
+ /* Public objects. */
+ spin_lock(&file->table_lock);
+ idr_for_each_entry(&file->object_idr, obj, id)
+ obj_meminfo(obj, stats);
+ spin_unlock(&file->table_lock);
+
+ /* Internal objects. */
+ rcu_read_lock();
+ list_for_each_rcu(pos, &client->objects_list) {
+ obj = i915_gem_object_get_rcu(list_entry(pos, typeof(*obj),
+ client_link));
+ if (!obj)
+ continue;
+ obj_meminfo(obj, stats);
+ i915_gem_object_put(obj);
+ }
+ rcu_read_unlock();
+
+ for_each_memory_region(mr, i915, id)
+ drm_print_memory_stats(p,
+ &stats[id],
+ DRM_GEM_OBJECT_RESIDENT |
+ DRM_GEM_OBJECT_PURGEABLE,
+ mr->uabi_name);
+}
+
static const char * const uabi_class_names[] = {
[I915_ENGINE_CLASS_RENDER] = "render",
[I915_ENGINE_CLASS_COPY] = "copy",
@@ -102,10 +168,52 @@ void i915_drm_client_fdinfo(struct drm_printer *p, struct drm_file *file)
* ******************************************************************
*/
+ show_meminfo(p, file);
+
if (GRAPHICS_VER(i915) < 8)
return;
for (i = 0; i < ARRAY_SIZE(uabi_class_names); i++)
show_client_class(p, i915, file_priv->client, i);
}
+
+void i915_drm_client_add_object(struct i915_drm_client *client,
+ struct drm_i915_gem_object *obj)
+{
+ unsigned long flags;
+
+ GEM_WARN_ON(obj->client);
+ GEM_WARN_ON(!list_empty(&obj->client_link));
+
+ spin_lock_irqsave(&client->objects_lock, flags);
+ obj->client = i915_drm_client_get(client);
+ list_add_tail_rcu(&obj->client_link, &client->objects_list);
+ spin_unlock_irqrestore(&client->objects_lock, flags);
+}
+
+void i915_drm_client_remove_object(struct drm_i915_gem_object *obj)
+{
+ struct i915_drm_client *client = fetch_and_zero(&obj->client);
+ unsigned long flags;
+
+ /* Object may not be associated with a client. */
+ if (!client)
+ return;
+
+ spin_lock_irqsave(&client->objects_lock, flags);
+ list_del_rcu(&obj->client_link);
+ spin_unlock_irqrestore(&client->objects_lock, flags);
+
+ i915_drm_client_put(client);
+}
+
+void i915_drm_client_add_context_objects(struct i915_drm_client *client,
+ struct intel_context *ce)
+{
+ if (ce->state)
+ i915_drm_client_add_object(client, ce->state->obj);
+
+ if (ce->ring != ce->engine->legacy.ring && ce->ring->vma)
+ i915_drm_client_add_object(client, ce->ring->vma->obj);
+}
#endif
diff --git a/drivers/gpu/drm/i915/i915_drm_client.h b/drivers/gpu/drm/i915/i915_drm_client.h
index 67816c912bca..a439dd789936 100644
--- a/drivers/gpu/drm/i915/i915_drm_client.h
+++ b/drivers/gpu/drm/i915/i915_drm_client.h
@@ -12,6 +12,10 @@
#include <uapi/drm/i915_drm.h>
+#include "i915_file_private.h"
+#include "gem/i915_gem_object_types.h"
+#include "gt/intel_context_types.h"
+
#define I915_LAST_UABI_ENGINE_CLASS I915_ENGINE_CLASS_COMPUTE
struct drm_file;
@@ -25,6 +29,20 @@ struct i915_drm_client {
spinlock_t ctx_lock; /* For add/remove from ctx_list. */
struct list_head ctx_list; /* List of contexts belonging to client. */
+#ifdef CONFIG_PROC_FS
+ /**
+ * @objects_lock: lock protecting @objects_list
+ */
+ spinlock_t objects_lock;
+
+ /**
+ * @objects_list: list of objects created by this client
+ *
+ * Protected by @objects_lock.
+ */
+ struct list_head objects_list;
+#endif
+
/**
* @past_runtime: Accumulation of pphwsp runtimes from closed contexts.
*/
@@ -49,4 +67,28 @@ struct i915_drm_client *i915_drm_client_alloc(void);
void i915_drm_client_fdinfo(struct drm_printer *p, struct drm_file *file);
+#ifdef CONFIG_PROC_FS
+void i915_drm_client_add_object(struct i915_drm_client *client,
+ struct drm_i915_gem_object *obj);
+void i915_drm_client_remove_object(struct drm_i915_gem_object *obj);
+void i915_drm_client_add_context_objects(struct i915_drm_client *client,
+ struct intel_context *ce);
+#else
+static inline void i915_drm_client_add_object(struct i915_drm_client *client,
+ struct drm_i915_gem_object *obj)
+{
+}
+
+static inline void
+i915_drm_client_remove_object(struct drm_i915_gem_object *obj)
+{
+}
+
+static inline void
+i915_drm_client_add_context_objects(struct i915_drm_client *client,
+ struct intel_context *ce)
+{
+}
+#endif
+
#endif /* !__I915_DRM_CLIENT_H__ */
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 0971f4976324..d04660b60046 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -57,6 +57,7 @@
#include "i915_memcpy.h"
#include "i915_reg.h"
#include "i915_scatterlist.h"
+#include "i915_sysfs.h"
#include "i915_utils.h"
#define ALLOW_FAIL (__GFP_KSWAPD_RECLAIM | __GFP_RETRY_MAYFAIL | __GFP_NOWARN)
@@ -520,7 +521,7 @@ __find_vma(struct i915_vma_coredump *vma, const char *name)
return NULL;
}
-struct i915_vma_coredump *
+static struct i915_vma_coredump *
intel_gpu_error_find_batch(const struct intel_engine_coredump *ee)
{
return __find_vma(ee->vma, "batch");
@@ -609,9 +610,9 @@ void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...)
va_end(args);
}
-void intel_gpu_error_print_vma(struct drm_i915_error_state_buf *m,
- const struct intel_engine_cs *engine,
- const struct i915_vma_coredump *vma)
+static void intel_gpu_error_print_vma(struct drm_i915_error_state_buf *m,
+ const struct intel_engine_cs *engine,
+ const struct i915_vma_coredump *vma)
{
char out[ASCII85_BUFSZ];
struct page *page;
@@ -2140,7 +2141,7 @@ __i915_gpu_coredump(struct intel_gt *gt, intel_engine_mask_t engine_mask, u32 du
return error;
}
-struct i915_gpu_coredump *
+static struct i915_gpu_coredump *
i915_gpu_coredump(struct intel_gt *gt, intel_engine_mask_t engine_mask, u32 dump_flags)
{
static DEFINE_MUTEX(capture_mutex);
@@ -2211,7 +2212,7 @@ void i915_capture_error_state(struct intel_gt *gt,
i915_gpu_coredump_put(error);
}
-struct i915_gpu_coredump *
+static struct i915_gpu_coredump *
i915_first_error_state(struct drm_i915_private *i915)
{
struct i915_gpu_coredump *error;
@@ -2378,3 +2379,184 @@ void intel_klog_error_capture(struct intel_gt *gt,
drm_info(&i915->drm, "[Capture/%d.%d] Dumped %zd bytes\n", l_count, line++, pos_err);
}
#endif
+
+static ssize_t gpu_state_read(struct file *file, char __user *ubuf,
+ size_t count, loff_t *pos)
+{
+ struct i915_gpu_coredump *error;
+ ssize_t ret;
+ void *buf;
+
+ error = file->private_data;
+ if (!error)
+ return 0;
+
+ /* Bounce buffer required because of kernfs __user API convenience. */
+ buf = kmalloc(count, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ ret = i915_gpu_coredump_copy_to_buffer(error, buf, *pos, count);
+ if (ret <= 0)
+ goto out;
+
+ if (!copy_to_user(ubuf, buf, ret))
+ *pos += ret;
+ else
+ ret = -EFAULT;
+
+out:
+ kfree(buf);
+ return ret;
+}
+
+static int gpu_state_release(struct inode *inode, struct file *file)
+{
+ i915_gpu_coredump_put(file->private_data);
+ return 0;
+}
+
+static int i915_gpu_info_open(struct inode *inode, struct file *file)
+{
+ struct drm_i915_private *i915 = inode->i_private;
+ struct i915_gpu_coredump *gpu;
+ intel_wakeref_t wakeref;
+
+ gpu = NULL;
+ with_intel_runtime_pm(&i915->runtime_pm, wakeref)
+ gpu = i915_gpu_coredump(to_gt(i915), ALL_ENGINES, CORE_DUMP_FLAG_NONE);
+
+ if (IS_ERR(gpu))
+ return PTR_ERR(gpu);
+
+ file->private_data = gpu;
+ return 0;
+}
+
+static const struct file_operations i915_gpu_info_fops = {
+ .owner = THIS_MODULE,
+ .open = i915_gpu_info_open,
+ .read = gpu_state_read,
+ .llseek = default_llseek,
+ .release = gpu_state_release,
+};
+
+static ssize_t
+i915_error_state_write(struct file *filp,
+ const char __user *ubuf,
+ size_t cnt,
+ loff_t *ppos)
+{
+ struct i915_gpu_coredump *error = filp->private_data;
+
+ if (!error)
+ return 0;
+
+ drm_dbg(&error->i915->drm, "Resetting error state\n");
+ i915_reset_error_state(error->i915);
+
+ return cnt;
+}
+
+static int i915_error_state_open(struct inode *inode, struct file *file)
+{
+ struct i915_gpu_coredump *error;
+
+ error = i915_first_error_state(inode->i_private);
+ if (IS_ERR(error))
+ return PTR_ERR(error);
+
+ file->private_data = error;
+ return 0;
+}
+
+static const struct file_operations i915_error_state_fops = {
+ .owner = THIS_MODULE,
+ .open = i915_error_state_open,
+ .read = gpu_state_read,
+ .write = i915_error_state_write,
+ .llseek = default_llseek,
+ .release = gpu_state_release,
+};
+
+void i915_gpu_error_debugfs_register(struct drm_i915_private *i915)
+{
+ struct drm_minor *minor = i915->drm.primary;
+
+ debugfs_create_file("i915_error_state", 0644, minor->debugfs_root, i915,
+ &i915_error_state_fops);
+ debugfs_create_file("i915_gpu_info", 0644, minor->debugfs_root, i915,
+ &i915_gpu_info_fops);
+}
+
+static ssize_t error_state_read(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr, char *buf,
+ loff_t off, size_t count)
+{
+
+ struct device *kdev = kobj_to_dev(kobj);
+ struct drm_i915_private *i915 = kdev_minor_to_i915(kdev);
+ struct i915_gpu_coredump *gpu;
+ ssize_t ret = 0;
+
+ /*
+ * FIXME: Concurrent clients triggering resets and reading + clearing
+ * dumps can cause inconsistent sysfs reads when a user calls in with a
+ * non-zero offset to complete a prior partial read but the
+ * gpu_coredump has been cleared or replaced.
+ */
+
+ gpu = i915_first_error_state(i915);
+ if (IS_ERR(gpu)) {
+ ret = PTR_ERR(gpu);
+ } else if (gpu) {
+ ret = i915_gpu_coredump_copy_to_buffer(gpu, buf, off, count);
+ i915_gpu_coredump_put(gpu);
+ } else {
+ const char *str = "No error state collected\n";
+ size_t len = strlen(str);
+
+ if (off < len) {
+ ret = min_t(size_t, count, len - off);
+ memcpy(buf, str + off, ret);
+ }
+ }
+
+ return ret;
+}
+
+static ssize_t error_state_write(struct file *file, struct kobject *kobj,
+ struct bin_attribute *attr, char *buf,
+ loff_t off, size_t count)
+{
+ struct device *kdev = kobj_to_dev(kobj);
+ struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
+
+ drm_dbg(&dev_priv->drm, "Resetting error state\n");
+ i915_reset_error_state(dev_priv);
+
+ return count;
+}
+
+static const struct bin_attribute error_state_attr = {
+ .attr.name = "error",
+ .attr.mode = S_IRUSR | S_IWUSR,
+ .size = 0,
+ .read = error_state_read,
+ .write = error_state_write,
+};
+
+void i915_gpu_error_sysfs_setup(struct drm_i915_private *i915)
+{
+ struct device *kdev = i915->drm.primary->kdev;
+
+ if (sysfs_create_bin_file(&kdev->kobj, &error_state_attr))
+ drm_err(&i915->drm, "error_state sysfs setup failed\n");
+}
+
+void i915_gpu_error_sysfs_teardown(struct drm_i915_private *i915)
+{
+ struct device *kdev = i915->drm.primary->kdev;
+
+ sysfs_remove_bin_file(&kdev->kobj, &error_state_attr);
+}
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.h b/drivers/gpu/drm/i915/i915_gpu_error.h
index 4ce227f7e1e1..7c255bb1c319 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.h
+++ b/drivers/gpu/drm/i915/i915_gpu_error.h
@@ -17,6 +17,7 @@
#include "display/intel_display_device.h"
#include "display/intel_display_params.h"
#include "gt/intel_engine.h"
+#include "gt/intel_engine_types.h"
#include "gt/intel_gt_types.h"
#include "gt/uc/intel_uc_fw.h"
@@ -234,7 +235,7 @@ struct i915_gpu_error {
atomic_t reset_count;
/** Number of times an engine has been reset */
- atomic_t reset_engine_count[I915_NUM_ENGINES];
+ atomic_t reset_engine_count[MAX_ENGINE_CLASS];
};
struct drm_i915_error_state_buf {
@@ -257,7 +258,14 @@ static inline u32 i915_reset_count(struct i915_gpu_error *error)
static inline u32 i915_reset_engine_count(struct i915_gpu_error *error,
const struct intel_engine_cs *engine)
{
- return atomic_read(&error->reset_engine_count[engine->uabi_class]);
+ return atomic_read(&error->reset_engine_count[engine->class]);
+}
+
+static inline void
+i915_increase_reset_engine_count(struct i915_gpu_error *error,
+ const struct intel_engine_cs *engine)
+{
+ atomic_inc(&error->reset_engine_count[engine->class]);
}
#define CORE_DUMP_FLAG_NONE 0x0
@@ -277,14 +285,7 @@ static inline void intel_klog_error_capture(struct intel_gt *gt,
__printf(2, 3)
void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...);
-void intel_gpu_error_print_vma(struct drm_i915_error_state_buf *m,
- const struct intel_engine_cs *engine,
- const struct i915_vma_coredump *vma);
-struct i915_vma_coredump *
-intel_gpu_error_find_batch(const struct intel_engine_coredump *ee);
-
-struct i915_gpu_coredump *i915_gpu_coredump(struct intel_gt *gt,
- intel_engine_mask_t engine_mask, u32 dump_flags);
+
void i915_capture_error_state(struct intel_gt *gt,
intel_engine_mask_t engine_mask, u32 dump_flags);
@@ -332,10 +333,13 @@ static inline void i915_gpu_coredump_put(struct i915_gpu_coredump *gpu)
kref_put(&gpu->ref, __i915_gpu_coredump_free);
}
-struct i915_gpu_coredump *i915_first_error_state(struct drm_i915_private *i915);
void i915_reset_error_state(struct drm_i915_private *i915);
void i915_disable_error_state(struct drm_i915_private *i915, int err);
+void i915_gpu_error_debugfs_register(struct drm_i915_private *i915);
+void i915_gpu_error_sysfs_setup(struct drm_i915_private *i915);
+void i915_gpu_error_sysfs_teardown(struct drm_i915_private *i915);
+
#else
__printf(2, 3)
@@ -403,12 +407,6 @@ static inline void i915_gpu_coredump_put(struct i915_gpu_coredump *gpu)
{
}
-static inline struct i915_gpu_coredump *
-i915_first_error_state(struct drm_i915_private *i915)
-{
- return ERR_PTR(-ENODEV);
-}
-
static inline void i915_reset_error_state(struct drm_i915_private *i915)
{
}
@@ -418,6 +416,18 @@ static inline void i915_disable_error_state(struct drm_i915_private *i915,
{
}
+static inline void i915_gpu_error_debugfs_register(struct drm_i915_private *i915)
+{
+}
+
+static inline void i915_gpu_error_sysfs_setup(struct drm_i915_private *i915)
+{
+}
+
+static inline void i915_gpu_error_sysfs_teardown(struct drm_i915_private *i915)
+{
+}
+
#endif /* IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR) */
#endif /* _I915_GPU_ERROR_H_ */
diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c
index f861863eb7c1..21eb0c5b320d 100644
--- a/drivers/gpu/drm/i915/i915_pmu.c
+++ b/drivers/gpu/drm/i915/i915_pmu.c
@@ -31,6 +31,16 @@
static cpumask_t i915_pmu_cpumask;
static unsigned int i915_pmu_target_cpu = -1;
+static struct i915_pmu *event_to_pmu(struct perf_event *event)
+{
+ return container_of(event->pmu, struct i915_pmu, base);
+}
+
+static struct drm_i915_private *pmu_to_i915(struct i915_pmu *pmu)
+{
+ return container_of(pmu, struct drm_i915_private, pmu);
+}
+
static u8 engine_config_sample(u64 config)
{
return config & I915_PMU_SAMPLE_MASK;
@@ -141,7 +151,7 @@ static u32 frequency_enabled_mask(void)
static bool pmu_needs_timer(struct i915_pmu *pmu)
{
- struct drm_i915_private *i915 = container_of(pmu, typeof(*i915), pmu);
+ struct drm_i915_private *i915 = pmu_to_i915(pmu);
u32 enable;
/*
@@ -213,19 +223,19 @@ static u64 get_rc6(struct intel_gt *gt)
struct drm_i915_private *i915 = gt->i915;
const unsigned int gt_id = gt->info.id;
struct i915_pmu *pmu = &i915->pmu;
+ intel_wakeref_t wakeref;
unsigned long flags;
- bool awake = false;
u64 val;
- if (intel_gt_pm_get_if_awake(gt)) {
+ wakeref = intel_gt_pm_get_if_awake(gt);
+ if (wakeref) {
val = __get_rc6(gt);
- intel_gt_pm_put_async(gt);
- awake = true;
+ intel_gt_pm_put_async(gt, wakeref);
}
spin_lock_irqsave(&pmu->lock, flags);
- if (awake) {
+ if (wakeref) {
store_sample(pmu, gt_id, __I915_SAMPLE_RC6, val);
} else {
/*
@@ -251,7 +261,7 @@ static u64 get_rc6(struct intel_gt *gt)
static void init_rc6(struct i915_pmu *pmu)
{
- struct drm_i915_private *i915 = container_of(pmu, typeof(*i915), pmu);
+ struct drm_i915_private *i915 = pmu_to_i915(pmu);
struct intel_gt *gt;
unsigned int i;
@@ -429,12 +439,14 @@ frequency_sample(struct intel_gt *gt, unsigned int period_ns)
const unsigned int gt_id = gt->info.id;
struct i915_pmu *pmu = &i915->pmu;
struct intel_rps *rps = &gt->rps;
+ intel_wakeref_t wakeref;
if (!frequency_sampling_enabled(pmu, gt_id))
return;
/* Report 0/0 (actual/requested) frequency while parked. */
- if (!intel_gt_pm_get_if_awake(gt))
+ wakeref = intel_gt_pm_get_if_awake(gt);
+ if (!wakeref)
return;
if (pmu->enable & config_mask(__I915_PMU_ACTUAL_FREQUENCY(gt_id))) {
@@ -463,14 +475,13 @@ frequency_sample(struct intel_gt *gt, unsigned int period_ns)
period_ns / 1000);
}
- intel_gt_pm_put_async(gt);
+ intel_gt_pm_put_async(gt, wakeref);
}
static enum hrtimer_restart i915_sample(struct hrtimer *hrtimer)
{
- struct drm_i915_private *i915 =
- container_of(hrtimer, struct drm_i915_private, pmu.timer);
- struct i915_pmu *pmu = &i915->pmu;
+ struct i915_pmu *pmu = container_of(hrtimer, struct i915_pmu, timer);
+ struct drm_i915_private *i915 = pmu_to_i915(pmu);
unsigned int period_ns;
struct intel_gt *gt;
unsigned int i;
@@ -505,8 +516,8 @@ static enum hrtimer_restart i915_sample(struct hrtimer *hrtimer)
static void i915_pmu_event_destroy(struct perf_event *event)
{
- struct drm_i915_private *i915 =
- container_of(event->pmu, typeof(*i915), pmu.base);
+ struct i915_pmu *pmu = event_to_pmu(event);
+ struct drm_i915_private *i915 = pmu_to_i915(pmu);
drm_WARN_ON(&i915->drm, event->parent);
@@ -572,8 +583,8 @@ config_status(struct drm_i915_private *i915, u64 config)
static int engine_event_init(struct perf_event *event)
{
- struct drm_i915_private *i915 =
- container_of(event->pmu, typeof(*i915), pmu.base);
+ struct i915_pmu *pmu = event_to_pmu(event);
+ struct drm_i915_private *i915 = pmu_to_i915(pmu);
struct intel_engine_cs *engine;
engine = intel_engine_lookup_user(i915, engine_event_class(event),
@@ -586,9 +597,8 @@ static int engine_event_init(struct perf_event *event)
static int i915_pmu_event_init(struct perf_event *event)
{
- struct drm_i915_private *i915 =
- container_of(event->pmu, typeof(*i915), pmu.base);
- struct i915_pmu *pmu = &i915->pmu;
+ struct i915_pmu *pmu = event_to_pmu(event);
+ struct drm_i915_private *i915 = pmu_to_i915(pmu);
int ret;
if (pmu->closed)
@@ -628,9 +638,8 @@ static int i915_pmu_event_init(struct perf_event *event)
static u64 __i915_pmu_event_read(struct perf_event *event)
{
- struct drm_i915_private *i915 =
- container_of(event->pmu, typeof(*i915), pmu.base);
- struct i915_pmu *pmu = &i915->pmu;
+ struct i915_pmu *pmu = event_to_pmu(event);
+ struct drm_i915_private *i915 = pmu_to_i915(pmu);
u64 val = 0;
if (is_engine_event(event)) {
@@ -686,10 +695,8 @@ static u64 __i915_pmu_event_read(struct perf_event *event)
static void i915_pmu_event_read(struct perf_event *event)
{
- struct drm_i915_private *i915 =
- container_of(event->pmu, typeof(*i915), pmu.base);
+ struct i915_pmu *pmu = event_to_pmu(event);
struct hw_perf_event *hwc = &event->hw;
- struct i915_pmu *pmu = &i915->pmu;
u64 prev, new;
if (pmu->closed) {
@@ -707,10 +714,9 @@ static void i915_pmu_event_read(struct perf_event *event)
static void i915_pmu_enable(struct perf_event *event)
{
- struct drm_i915_private *i915 =
- container_of(event->pmu, typeof(*i915), pmu.base);
+ struct i915_pmu *pmu = event_to_pmu(event);
+ struct drm_i915_private *i915 = pmu_to_i915(pmu);
const unsigned int bit = event_bit(event);
- struct i915_pmu *pmu = &i915->pmu;
unsigned long flags;
if (bit == -1)
@@ -771,10 +777,9 @@ update:
static void i915_pmu_disable(struct perf_event *event)
{
- struct drm_i915_private *i915 =
- container_of(event->pmu, typeof(*i915), pmu.base);
+ struct i915_pmu *pmu = event_to_pmu(event);
+ struct drm_i915_private *i915 = pmu_to_i915(pmu);
const unsigned int bit = event_bit(event);
- struct i915_pmu *pmu = &i915->pmu;
unsigned long flags;
if (bit == -1)
@@ -818,9 +823,7 @@ static void i915_pmu_disable(struct perf_event *event)
static void i915_pmu_event_start(struct perf_event *event, int flags)
{
- struct drm_i915_private *i915 =
- container_of(event->pmu, typeof(*i915), pmu.base);
- struct i915_pmu *pmu = &i915->pmu;
+ struct i915_pmu *pmu = event_to_pmu(event);
if (pmu->closed)
return;
@@ -848,9 +851,7 @@ out:
static int i915_pmu_event_add(struct perf_event *event, int flags)
{
- struct drm_i915_private *i915 =
- container_of(event->pmu, typeof(*i915), pmu.base);
- struct i915_pmu *pmu = &i915->pmu;
+ struct i915_pmu *pmu = event_to_pmu(event);
if (pmu->closed)
return -ENODEV;
@@ -982,7 +983,7 @@ add_pmu_attr(struct perf_pmu_events_attr *attr, const char *name,
static struct attribute **
create_event_attributes(struct i915_pmu *pmu)
{
- struct drm_i915_private *i915 = container_of(pmu, typeof(*i915), pmu);
+ struct drm_i915_private *i915 = pmu_to_i915(pmu);
static const struct {
unsigned int counter;
const char *name;
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
index e88bb4f04305..613decd47760 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -155,81 +155,6 @@ static const struct bin_attribute dpf_attrs_1 = {
.private = (void *)1
};
-#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
-
-static ssize_t error_state_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr, char *buf,
- loff_t off, size_t count)
-{
-
- struct device *kdev = kobj_to_dev(kobj);
- struct drm_i915_private *i915 = kdev_minor_to_i915(kdev);
- struct i915_gpu_coredump *gpu;
- ssize_t ret = 0;
-
- /*
- * FIXME: Concurrent clients triggering resets and reading + clearing
- * dumps can cause inconsistent sysfs reads when a user calls in with a
- * non-zero offset to complete a prior partial read but the
- * gpu_coredump has been cleared or replaced.
- */
-
- gpu = i915_first_error_state(i915);
- if (IS_ERR(gpu)) {
- ret = PTR_ERR(gpu);
- } else if (gpu) {
- ret = i915_gpu_coredump_copy_to_buffer(gpu, buf, off, count);
- i915_gpu_coredump_put(gpu);
- } else {
- const char *str = "No error state collected\n";
- size_t len = strlen(str);
-
- if (off < len) {
- ret = min_t(size_t, count, len - off);
- memcpy(buf, str + off, ret);
- }
- }
-
- return ret;
-}
-
-static ssize_t error_state_write(struct file *file, struct kobject *kobj,
- struct bin_attribute *attr, char *buf,
- loff_t off, size_t count)
-{
- struct device *kdev = kobj_to_dev(kobj);
- struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
-
- drm_dbg(&dev_priv->drm, "Resetting error state\n");
- i915_reset_error_state(dev_priv);
-
- return count;
-}
-
-static const struct bin_attribute error_state_attr = {
- .attr.name = "error",
- .attr.mode = S_IRUSR | S_IWUSR,
- .size = 0,
- .read = error_state_read,
- .write = error_state_write,
-};
-
-static void i915_setup_error_capture(struct device *kdev)
-{
- if (sysfs_create_bin_file(&kdev->kobj, &error_state_attr))
- drm_err(&kdev_minor_to_i915(kdev)->drm,
- "error_state sysfs setup failed\n");
-}
-
-static void i915_teardown_error_capture(struct device *kdev)
-{
- sysfs_remove_bin_file(&kdev->kobj, &error_state_attr);
-}
-#else
-static void i915_setup_error_capture(struct device *kdev) {}
-static void i915_teardown_error_capture(struct device *kdev) {}
-#endif
-
void i915_setup_sysfs(struct drm_i915_private *dev_priv)
{
struct device *kdev = dev_priv->drm.primary->kdev;
@@ -255,7 +180,7 @@ void i915_setup_sysfs(struct drm_i915_private *dev_priv)
drm_warn(&dev_priv->drm,
"failed to register GT sysfs directory\n");
- i915_setup_error_capture(kdev);
+ i915_gpu_error_sysfs_setup(dev_priv);
intel_engines_add_sysfs(dev_priv);
}
@@ -264,7 +189,7 @@ void i915_teardown_sysfs(struct drm_i915_private *dev_priv)
{
struct device *kdev = dev_priv->drm.primary->kdev;
- i915_teardown_error_capture(kdev);
+ i915_gpu_error_sysfs_teardown(dev_priv);
device_remove_bin_file(kdev, &dpf_attrs_1);
device_remove_bin_file(kdev, &dpf_attrs);
diff --git a/drivers/gpu/drm/i915/intel_memory_region.c b/drivers/gpu/drm/i915/intel_memory_region.c
index 3d1fdea9811d..60a03340bbd4 100644
--- a/drivers/gpu/drm/i915/intel_memory_region.c
+++ b/drivers/gpu/drm/i915/intel_memory_region.c
@@ -216,6 +216,22 @@ static int intel_memory_region_memtest(struct intel_memory_region *mem,
return err;
}
+static const char *region_type_str(u16 type)
+{
+ switch (type) {
+ case INTEL_MEMORY_SYSTEM:
+ return "system";
+ case INTEL_MEMORY_LOCAL:
+ return "local";
+ case INTEL_MEMORY_STOLEN_LOCAL:
+ return "stolen-local";
+ case INTEL_MEMORY_STOLEN_SYSTEM:
+ return "stolen-system";
+ default:
+ return "unknown";
+ }
+}
+
struct intel_memory_region *
intel_memory_region_create(struct drm_i915_private *i915,
resource_size_t start,
@@ -244,6 +260,9 @@ intel_memory_region_create(struct drm_i915_private *i915,
mem->type = type;
mem->instance = instance;
+ snprintf(mem->uabi_name, sizeof(mem->uabi_name), "%s%u",
+ region_type_str(type), instance);
+
mutex_init(&mem->objects.lock);
INIT_LIST_HEAD(&mem->objects.list);
diff --git a/drivers/gpu/drm/i915/intel_memory_region.h b/drivers/gpu/drm/i915/intel_memory_region.h
index 2953ed5c3248..9ba36454e51b 100644
--- a/drivers/gpu/drm/i915/intel_memory_region.h
+++ b/drivers/gpu/drm/i915/intel_memory_region.h
@@ -80,6 +80,7 @@ struct intel_memory_region {
u16 instance;
enum intel_region_id id;
char name[16];
+ char uabi_name[16];
bool private; /* not for userspace */
struct {
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
index 8743153fad87..860b51b56a92 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.c
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
@@ -50,184 +50,44 @@
* present for a given platform.
*/
-#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
-
-#include <linux/sort.h>
-
-#define STACKDEPTH 8
-
-static noinline depot_stack_handle_t __save_depot_stack(void)
+static struct drm_i915_private *rpm_to_i915(struct intel_runtime_pm *rpm)
{
- unsigned long entries[STACKDEPTH];
- unsigned int n;
-
- n = stack_trace_save(entries, ARRAY_SIZE(entries), 1);
- return stack_depot_save(entries, n, GFP_NOWAIT | __GFP_NOWARN);
+ return container_of(rpm, struct drm_i915_private, runtime_pm);
}
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
+
static void init_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm)
{
- spin_lock_init(&rpm->debug.lock);
- stack_depot_init();
+ ref_tracker_dir_init(&rpm->debug, INTEL_REFTRACK_DEAD_COUNT, dev_name(rpm->kdev));
}
-static noinline depot_stack_handle_t
+static intel_wakeref_t
track_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm)
{
- depot_stack_handle_t stack, *stacks;
- unsigned long flags;
-
- if (rpm->no_wakeref_tracking)
+ if (!rpm->available || rpm->no_wakeref_tracking)
return -1;
- stack = __save_depot_stack();
- if (!stack)
- return -1;
-
- spin_lock_irqsave(&rpm->debug.lock, flags);
-
- if (!rpm->debug.count)
- rpm->debug.last_acquire = stack;
-
- stacks = krealloc(rpm->debug.owners,
- (rpm->debug.count + 1) * sizeof(*stacks),
- GFP_NOWAIT | __GFP_NOWARN);
- if (stacks) {
- stacks[rpm->debug.count++] = stack;
- rpm->debug.owners = stacks;
- } else {
- stack = -1;
- }
-
- spin_unlock_irqrestore(&rpm->debug.lock, flags);
-
- return stack;
+ return intel_ref_tracker_alloc(&rpm->debug);
}
static void untrack_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm,
- depot_stack_handle_t stack)
+ intel_wakeref_t wakeref)
{
- struct drm_i915_private *i915 = container_of(rpm,
- struct drm_i915_private,
- runtime_pm);
- unsigned long flags, n;
- bool found = false;
-
- if (unlikely(stack == -1))
+ if (!rpm->available || rpm->no_wakeref_tracking)
return;
- spin_lock_irqsave(&rpm->debug.lock, flags);
- for (n = rpm->debug.count; n--; ) {
- if (rpm->debug.owners[n] == stack) {
- memmove(rpm->debug.owners + n,
- rpm->debug.owners + n + 1,
- (--rpm->debug.count - n) * sizeof(stack));
- found = true;
- break;
- }
- }
- spin_unlock_irqrestore(&rpm->debug.lock, flags);
-
- if (drm_WARN(&i915->drm, !found,
- "Unmatched wakeref (tracking %lu), count %u\n",
- rpm->debug.count, atomic_read(&rpm->wakeref_count))) {
- char *buf;
-
- buf = kmalloc(PAGE_SIZE, GFP_NOWAIT | __GFP_NOWARN);
- if (!buf)
- return;
-
- stack_depot_snprint(stack, buf, PAGE_SIZE, 2);
- DRM_DEBUG_DRIVER("wakeref %x from\n%s", stack, buf);
-
- stack = READ_ONCE(rpm->debug.last_release);
- if (stack) {
- stack_depot_snprint(stack, buf, PAGE_SIZE, 2);
- DRM_DEBUG_DRIVER("wakeref last released at\n%s", buf);
- }
-
- kfree(buf);
- }
+ intel_ref_tracker_free(&rpm->debug, wakeref);
}
-static int cmphandle(const void *_a, const void *_b)
+static void untrack_all_intel_runtime_pm_wakerefs(struct intel_runtime_pm *rpm)
{
- const depot_stack_handle_t * const a = _a, * const b = _b;
-
- if (*a < *b)
- return -1;
- else if (*a > *b)
- return 1;
- else
- return 0;
-}
-
-static void
-__print_intel_runtime_pm_wakeref(struct drm_printer *p,
- const struct intel_runtime_pm_debug *dbg)
-{
- unsigned long i;
- char *buf;
-
- buf = kmalloc(PAGE_SIZE, GFP_NOWAIT | __GFP_NOWARN);
- if (!buf)
- return;
-
- if (dbg->last_acquire) {
- stack_depot_snprint(dbg->last_acquire, buf, PAGE_SIZE, 2);
- drm_printf(p, "Wakeref last acquired:\n%s", buf);
- }
-
- if (dbg->last_release) {
- stack_depot_snprint(dbg->last_release, buf, PAGE_SIZE, 2);
- drm_printf(p, "Wakeref last released:\n%s", buf);
- }
-
- drm_printf(p, "Wakeref count: %lu\n", dbg->count);
-
- sort(dbg->owners, dbg->count, sizeof(*dbg->owners), cmphandle, NULL);
-
- for (i = 0; i < dbg->count; i++) {
- depot_stack_handle_t stack = dbg->owners[i];
- unsigned long rep;
-
- rep = 1;
- while (i + 1 < dbg->count && dbg->owners[i + 1] == stack)
- rep++, i++;
- stack_depot_snprint(stack, buf, PAGE_SIZE, 2);
- drm_printf(p, "Wakeref x%lu taken at:\n%s", rep, buf);
- }
-
- kfree(buf);
-}
-
-static noinline void
-__untrack_all_wakerefs(struct intel_runtime_pm_debug *debug,
- struct intel_runtime_pm_debug *saved)
-{
- *saved = *debug;
-
- debug->owners = NULL;
- debug->count = 0;
- debug->last_release = __save_depot_stack();
-}
-
-static void
-dump_and_free_wakeref_tracking(struct intel_runtime_pm_debug *debug)
-{
- if (debug->count) {
- struct drm_printer p = drm_debug_printer("i915");
-
- __print_intel_runtime_pm_wakeref(&p, debug);
- }
-
- kfree(debug->owners);
+ ref_tracker_dir_exit(&rpm->debug);
}
static noinline void
__intel_wakeref_dec_and_check_tracking(struct intel_runtime_pm *rpm)
{
- struct intel_runtime_pm_debug dbg = {};
unsigned long flags;
if (!atomic_dec_and_lock_irqsave(&rpm->wakeref_count,
@@ -235,60 +95,14 @@ __intel_wakeref_dec_and_check_tracking(struct intel_runtime_pm *rpm)
flags))
return;
- __untrack_all_wakerefs(&rpm->debug, &dbg);
+ ref_tracker_dir_print_locked(&rpm->debug, INTEL_REFTRACK_PRINT_LIMIT);
spin_unlock_irqrestore(&rpm->debug.lock, flags);
-
- dump_and_free_wakeref_tracking(&dbg);
-}
-
-static noinline void
-untrack_all_intel_runtime_pm_wakerefs(struct intel_runtime_pm *rpm)
-{
- struct intel_runtime_pm_debug dbg = {};
- unsigned long flags;
-
- spin_lock_irqsave(&rpm->debug.lock, flags);
- __untrack_all_wakerefs(&rpm->debug, &dbg);
- spin_unlock_irqrestore(&rpm->debug.lock, flags);
-
- dump_and_free_wakeref_tracking(&dbg);
}
void print_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm,
struct drm_printer *p)
{
- struct intel_runtime_pm_debug dbg = {};
-
- do {
- unsigned long alloc = dbg.count;
- depot_stack_handle_t *s;
-
- spin_lock_irq(&rpm->debug.lock);
- dbg.count = rpm->debug.count;
- if (dbg.count <= alloc) {
- memcpy(dbg.owners,
- rpm->debug.owners,
- dbg.count * sizeof(*s));
- }
- dbg.last_acquire = rpm->debug.last_acquire;
- dbg.last_release = rpm->debug.last_release;
- spin_unlock_irq(&rpm->debug.lock);
- if (dbg.count <= alloc)
- break;
-
- s = krealloc(dbg.owners,
- dbg.count * sizeof(*s),
- GFP_NOWAIT | __GFP_NOWARN);
- if (!s)
- goto out;
-
- dbg.owners = s;
- } while (1);
-
- __print_intel_runtime_pm_wakeref(p, &dbg);
-
-out:
- kfree(dbg.owners);
+ intel_ref_tracker_show(&rpm->debug, p);
}
#else
@@ -297,14 +111,14 @@ static void init_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm)
{
}
-static depot_stack_handle_t
+static intel_wakeref_t
track_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm)
{
return -1;
}
static void untrack_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm,
- intel_wakeref_t wref)
+ intel_wakeref_t wakeref)
{
}
@@ -349,9 +163,7 @@ intel_runtime_pm_release(struct intel_runtime_pm *rpm, int wakelock)
static intel_wakeref_t __intel_runtime_pm_get(struct intel_runtime_pm *rpm,
bool wakelock)
{
- struct drm_i915_private *i915 = container_of(rpm,
- struct drm_i915_private,
- runtime_pm);
+ struct drm_i915_private *i915 = rpm_to_i915(rpm);
int ret;
ret = pm_runtime_get_sync(rpm->kdev);
@@ -556,9 +368,7 @@ void intel_runtime_pm_put(struct intel_runtime_pm *rpm, intel_wakeref_t wref)
*/
void intel_runtime_pm_enable(struct intel_runtime_pm *rpm)
{
- struct drm_i915_private *i915 = container_of(rpm,
- struct drm_i915_private,
- runtime_pm);
+ struct drm_i915_private *i915 = rpm_to_i915(rpm);
struct device *kdev = rpm->kdev;
/*
@@ -611,9 +421,7 @@ void intel_runtime_pm_enable(struct intel_runtime_pm *rpm)
void intel_runtime_pm_disable(struct intel_runtime_pm *rpm)
{
- struct drm_i915_private *i915 = container_of(rpm,
- struct drm_i915_private,
- runtime_pm);
+ struct drm_i915_private *i915 = rpm_to_i915(rpm);
struct device *kdev = rpm->kdev;
/* Transfer rpm ownership back to core */
@@ -628,9 +436,7 @@ void intel_runtime_pm_disable(struct intel_runtime_pm *rpm)
void intel_runtime_pm_driver_release(struct intel_runtime_pm *rpm)
{
- struct drm_i915_private *i915 = container_of(rpm,
- struct drm_i915_private,
- runtime_pm);
+ struct drm_i915_private *i915 = rpm_to_i915(rpm);
int count = atomic_read(&rpm->wakeref_count);
intel_wakeref_auto_fini(&rpm->userfault_wakeref);
@@ -639,14 +445,17 @@ void intel_runtime_pm_driver_release(struct intel_runtime_pm *rpm)
"i915 raw-wakerefs=%d wakelocks=%d on cleanup\n",
intel_rpm_raw_wakeref_count(count),
intel_rpm_wakelock_count(count));
+}
+void intel_runtime_pm_driver_last_release(struct intel_runtime_pm *rpm)
+{
+ intel_runtime_pm_driver_release(rpm);
untrack_all_intel_runtime_pm_wakerefs(rpm);
}
void intel_runtime_pm_init_early(struct intel_runtime_pm *rpm)
{
- struct drm_i915_private *i915 =
- container_of(rpm, struct drm_i915_private, runtime_pm);
+ struct drm_i915_private *i915 = rpm_to_i915(rpm);
struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
struct device *kdev = &pdev->dev;
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.h b/drivers/gpu/drm/i915/intel_runtime_pm.h
index be43614c73fd..de3579d399e1 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.h
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.h
@@ -75,15 +75,7 @@ struct intel_runtime_pm {
* paired rpm_put) we can remove corresponding pairs of and keep
* the array trimmed to active wakerefs.
*/
- struct intel_runtime_pm_debug {
- spinlock_t lock;
-
- depot_stack_handle_t last_acquire;
- depot_stack_handle_t last_release;
-
- depot_stack_handle_t *owners;
- unsigned long count;
- } debug;
+ struct ref_tracker_dir debug;
#endif
};
@@ -187,6 +179,7 @@ void intel_runtime_pm_init_early(struct intel_runtime_pm *rpm);
void intel_runtime_pm_enable(struct intel_runtime_pm *rpm);
void intel_runtime_pm_disable(struct intel_runtime_pm *rpm);
void intel_runtime_pm_driver_release(struct intel_runtime_pm *rpm);
+void intel_runtime_pm_driver_last_release(struct intel_runtime_pm *rpm);
intel_wakeref_t intel_runtime_pm_get(struct intel_runtime_pm *rpm);
intel_wakeref_t intel_runtime_pm_get_if_in_use(struct intel_runtime_pm *rpm);
diff --git a/drivers/gpu/drm/i915/intel_wakeref.c b/drivers/gpu/drm/i915/intel_wakeref.c
index 623a69089386..dea2f63184f8 100644
--- a/drivers/gpu/drm/i915/intel_wakeref.c
+++ b/drivers/gpu/drm/i915/intel_wakeref.c
@@ -99,7 +99,8 @@ static void __intel_wakeref_put_work(struct work_struct *wrk)
void __intel_wakeref_init(struct intel_wakeref *wf,
struct drm_i915_private *i915,
const struct intel_wakeref_ops *ops,
- struct intel_wakeref_lockclass *key)
+ struct intel_wakeref_lockclass *key,
+ const char *name)
{
wf->i915 = i915;
wf->ops = ops;
@@ -111,6 +112,10 @@ void __intel_wakeref_init(struct intel_wakeref *wf,
INIT_DELAYED_WORK(&wf->work, __intel_wakeref_put_work);
lockdep_init_map(&wf->work.work.lockdep_map,
"wakeref.work", &key->work, 0);
+
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_WAKEREF)
+ ref_tracker_dir_init(&wf->debug, INTEL_REFTRACK_DEAD_COUNT, name);
+#endif
}
int intel_wakeref_wait_for_idle(struct intel_wakeref *wf)
@@ -191,3 +196,31 @@ void intel_wakeref_auto_fini(struct intel_wakeref_auto *wf)
intel_wakeref_auto(wf, 0);
INTEL_WAKEREF_BUG_ON(wf->wakeref);
}
+
+void intel_ref_tracker_show(struct ref_tracker_dir *dir,
+ struct drm_printer *p)
+{
+ const size_t buf_size = PAGE_SIZE;
+ char *buf, *sb, *se;
+ size_t count;
+
+ buf = kmalloc(buf_size, GFP_NOWAIT);
+ if (!buf)
+ return;
+
+ count = ref_tracker_dir_snprint(dir, buf, buf_size);
+ if (!count)
+ goto free;
+ /* printk does not like big buffers, so we split it */
+ for (sb = buf; *sb; sb = se + 1) {
+ se = strchrnul(sb, '\n');
+ drm_printf(p, "%.*s", (int)(se - sb + 1), sb);
+ if (!*se)
+ break;
+ }
+ if (count >= buf_size)
+ drm_printf(p, "\n...dropped %zd extra bytes of leak report.\n",
+ count + 1 - buf_size);
+free:
+ kfree(buf);
+}
diff --git a/drivers/gpu/drm/i915/intel_wakeref.h b/drivers/gpu/drm/i915/intel_wakeref.h
index ec881b097368..68aa3be48251 100644
--- a/drivers/gpu/drm/i915/intel_wakeref.h
+++ b/drivers/gpu/drm/i915/intel_wakeref.h
@@ -7,16 +7,25 @@
#ifndef INTEL_WAKEREF_H
#define INTEL_WAKEREF_H
+#include <drm/drm_print.h>
+
#include <linux/atomic.h>
#include <linux/bitfield.h>
#include <linux/bits.h>
#include <linux/lockdep.h>
#include <linux/mutex.h>
#include <linux/refcount.h>
+#include <linux/ref_tracker.h>
+#include <linux/slab.h>
#include <linux/stackdepot.h>
#include <linux/timer.h>
#include <linux/workqueue.h>
+typedef unsigned long intel_wakeref_t;
+
+#define INTEL_REFTRACK_DEAD_COUNT 16
+#define INTEL_REFTRACK_PRINT_LIMIT 16
+
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG)
#define INTEL_WAKEREF_BUG_ON(expr) BUG_ON(expr)
#else
@@ -26,8 +35,6 @@
struct intel_runtime_pm;
struct intel_wakeref;
-typedef depot_stack_handle_t intel_wakeref_t;
-
struct intel_wakeref_ops {
int (*get)(struct intel_wakeref *wf);
int (*put)(struct intel_wakeref *wf);
@@ -43,6 +50,10 @@ struct intel_wakeref {
const struct intel_wakeref_ops *ops;
struct delayed_work work;
+
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_WAKEREF)
+ struct ref_tracker_dir debug;
+#endif
};
struct intel_wakeref_lockclass {
@@ -53,11 +64,12 @@ struct intel_wakeref_lockclass {
void __intel_wakeref_init(struct intel_wakeref *wf,
struct drm_i915_private *i915,
const struct intel_wakeref_ops *ops,
- struct intel_wakeref_lockclass *key);
-#define intel_wakeref_init(wf, i915, ops) do { \
+ struct intel_wakeref_lockclass *key,
+ const char *name);
+#define intel_wakeref_init(wf, i915, ops, name) do { \
static struct intel_wakeref_lockclass __key; \
\
- __intel_wakeref_init((wf), (i915), (ops), &__key); \
+ __intel_wakeref_init((wf), (i915), (ops), &__key, name); \
} while (0)
int __intel_wakeref_get_first(struct intel_wakeref *wf);
@@ -261,6 +273,57 @@ __intel_wakeref_defer_park(struct intel_wakeref *wf)
*/
int intel_wakeref_wait_for_idle(struct intel_wakeref *wf);
+#define INTEL_WAKEREF_DEF ((intel_wakeref_t)(-1))
+
+static inline intel_wakeref_t intel_ref_tracker_alloc(struct ref_tracker_dir *dir)
+{
+ struct ref_tracker *user = NULL;
+
+ ref_tracker_alloc(dir, &user, GFP_NOWAIT);
+
+ return (intel_wakeref_t)user ?: INTEL_WAKEREF_DEF;
+}
+
+static inline void intel_ref_tracker_free(struct ref_tracker_dir *dir,
+ intel_wakeref_t handle)
+{
+ struct ref_tracker *user;
+
+ user = (handle == INTEL_WAKEREF_DEF) ? NULL : (void *)handle;
+
+ ref_tracker_free(dir, &user);
+}
+
+void intel_ref_tracker_show(struct ref_tracker_dir *dir,
+ struct drm_printer *p);
+
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_WAKEREF)
+
+static inline intel_wakeref_t intel_wakeref_track(struct intel_wakeref *wf)
+{
+ return intel_ref_tracker_alloc(&wf->debug);
+}
+
+static inline void intel_wakeref_untrack(struct intel_wakeref *wf,
+ intel_wakeref_t handle)
+{
+ intel_ref_tracker_free(&wf->debug, handle);
+}
+
+#else
+
+static inline intel_wakeref_t intel_wakeref_track(struct intel_wakeref *wf)
+{
+ return -1;
+}
+
+static inline void intel_wakeref_untrack(struct intel_wakeref *wf,
+ intel_wakeref_t handle)
+{
+}
+
+#endif
+
struct intel_wakeref_auto {
struct drm_i915_private *i915;
struct timer_list timer;
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp.c b/drivers/gpu/drm/i915/pxp/intel_pxp.c
index dc327cf40b5a..75278e78ca90 100644
--- a/drivers/gpu/drm/i915/pxp/intel_pxp.c
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp.c
@@ -199,6 +199,9 @@ int intel_pxp_init(struct drm_i915_private *i915)
struct intel_gt *gt;
bool is_full_feature = false;
+ if (intel_gt_is_wedged(to_gt(i915)))
+ return -ENOTCONN;
+
/*
* NOTE: Get the ctrl_gt before checking intel_pxp_is_supported since
* we still need it if PXP's backend tee transport is needed.
@@ -303,6 +306,8 @@ static int __pxp_global_teardown_final(struct intel_pxp *pxp)
if (!pxp->arb_is_valid)
return 0;
+
+ drm_dbg(&pxp->ctrl_gt->i915->drm, "PXP: teardown for suspend/fini");
/*
* To ensure synchronous and coherent session teardown completion
* in response to suspend or shutdown triggers, don't use a worker.
@@ -324,6 +329,8 @@ static int __pxp_global_teardown_restart(struct intel_pxp *pxp)
if (pxp->arb_is_valid)
return 0;
+
+ drm_dbg(&pxp->ctrl_gt->i915->drm, "PXP: teardown for restart");
/*
* The arb-session is currently inactive and we are doing a reset and restart
* due to a runtime event. Use the worker that was designed for this.
@@ -332,8 +339,11 @@ static int __pxp_global_teardown_restart(struct intel_pxp *pxp)
timeout = intel_pxp_get_backend_timeout_ms(pxp);
- if (!wait_for_completion_timeout(&pxp->termination, msecs_to_jiffies(timeout)))
+ if (!wait_for_completion_timeout(&pxp->termination, msecs_to_jiffies(timeout))) {
+ drm_dbg(&pxp->ctrl_gt->i915->drm, "PXP: restart backend timed out (%d ms)",
+ timeout);
return -ETIMEDOUT;
+ }
return 0;
}
@@ -414,10 +424,12 @@ int intel_pxp_start(struct intel_pxp *pxp)
int ret = 0;
ret = intel_pxp_get_readiness_status(pxp, PXP_READINESS_TIMEOUT);
- if (ret < 0)
+ if (ret < 0) {
+ drm_dbg(&pxp->ctrl_gt->i915->drm, "PXP: tried but not-avail (%d)", ret);
return ret;
- else if (ret > 1)
+ } else if (ret > 1) {
return -EIO; /* per UAPI spec, user may retry later */
+ }
mutex_lock(&pxp->arb_mutex);
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_irq.c b/drivers/gpu/drm/i915/pxp/intel_pxp_irq.c
index 91e9622c07d0..d81750b9bdda 100644
--- a/drivers/gpu/drm/i915/pxp/intel_pxp_irq.c
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp_irq.c
@@ -40,11 +40,12 @@ void intel_pxp_irq_handler(struct intel_pxp *pxp, u16 iir)
GEN12_DISPLAY_APP_TERMINATED_PER_FW_REQ_INTERRUPT)) {
/* immediately mark PXP as inactive on termination */
intel_pxp_mark_termination_in_progress(pxp);
- pxp->session_events |= PXP_TERMINATION_REQUEST | PXP_INVAL_REQUIRED;
+ pxp->session_events |= PXP_TERMINATION_REQUEST | PXP_INVAL_REQUIRED |
+ PXP_EVENT_TYPE_IRQ;
}
if (iir & GEN12_DISPLAY_STATE_RESET_COMPLETE_INTERRUPT)
- pxp->session_events |= PXP_TERMINATION_COMPLETE;
+ pxp->session_events |= PXP_TERMINATION_COMPLETE | PXP_EVENT_TYPE_IRQ;
if (pxp->session_events)
queue_work(system_unbound_wq, &pxp->session_work);
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_session.c b/drivers/gpu/drm/i915/pxp/intel_pxp_session.c
index 0a3e66b0265e..091c86e03d1a 100644
--- a/drivers/gpu/drm/i915/pxp/intel_pxp_session.c
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp_session.c
@@ -137,8 +137,10 @@ void intel_pxp_terminate(struct intel_pxp *pxp, bool post_invalidation_needs_res
static void pxp_terminate_complete(struct intel_pxp *pxp)
{
/* Re-create the arb session after teardown handle complete */
- if (fetch_and_zero(&pxp->hw_state_invalidated))
+ if (fetch_and_zero(&pxp->hw_state_invalidated)) {
+ drm_dbg(&pxp->ctrl_gt->i915->drm, "PXP: creating arb_session after invalidation");
pxp_create_arb_session(pxp);
+ }
complete_all(&pxp->termination);
}
@@ -157,6 +159,8 @@ static void pxp_session_work(struct work_struct *work)
if (!events)
return;
+ drm_dbg(&gt->i915->drm, "PXP: processing event-flags 0x%08x", events);
+
if (events & PXP_INVAL_REQUIRED)
intel_pxp_invalidate(pxp);
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_types.h b/drivers/gpu/drm/i915/pxp/intel_pxp_types.h
index 7e11fa8034b2..07864b584cf4 100644
--- a/drivers/gpu/drm/i915/pxp/intel_pxp_types.h
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp_types.h
@@ -124,6 +124,7 @@ struct intel_pxp {
#define PXP_TERMINATION_REQUEST BIT(0)
#define PXP_TERMINATION_COMPLETE BIT(1)
#define PXP_INVAL_REQUIRED BIT(2)
+#define PXP_EVENT_TYPE_IRQ BIT(3)
};
#endif /* __INTEL_PXP_TYPES_H__ */
diff --git a/drivers/gpu/drm/i915/selftests/i915_syncmap.c b/drivers/gpu/drm/i915/selftests/i915_syncmap.c
index 47f4ae18a1ef..88fa845e9f4a 100644
--- a/drivers/gpu/drm/i915/selftests/i915_syncmap.c
+++ b/drivers/gpu/drm/i915/selftests/i915_syncmap.c
@@ -77,7 +77,7 @@ __sync_print(struct i915_syncmap *p,
for_each_set_bit(i, (unsigned long *)&p->bitmap, KSYNCMAP) {
buf = __sync_print(__sync_child(p)[i], buf, sz,
depth + 1,
- last << 1 | !!(p->bitmap >> (i + 1)),
+ last << 1 | ((p->bitmap >> (i + 1)) ? 1 : 0),
i);
}
}
diff --git a/drivers/gpu/drm/i915/selftests/igt_live_test.c b/drivers/gpu/drm/i915/selftests/igt_live_test.c
index 4ddc6d902752..7d41874a49c5 100644
--- a/drivers/gpu/drm/i915/selftests/igt_live_test.c
+++ b/drivers/gpu/drm/i915/selftests/igt_live_test.c
@@ -37,8 +37,9 @@ int igt_live_test_begin(struct igt_live_test *t,
}
for_each_engine(engine, gt, id)
- t->reset_engine[id] =
- i915_reset_engine_count(&i915->gpu_error, engine);
+ t->reset_engine[i][id] =
+ i915_reset_engine_count(&i915->gpu_error,
+ engine);
}
t->reset_global = i915_reset_count(&i915->gpu_error);
@@ -66,14 +67,14 @@ int igt_live_test_end(struct igt_live_test *t)
for_each_gt(gt, i915, i) {
for_each_engine(engine, gt, id) {
- if (t->reset_engine[id] ==
+ if (t->reset_engine[i][id] ==
i915_reset_engine_count(&i915->gpu_error, engine))
continue;
gt_err(gt, "%s(%s): engine '%s' was reset %d times!\n",
t->func, t->name, engine->name,
i915_reset_engine_count(&i915->gpu_error, engine) -
- t->reset_engine[id]);
+ t->reset_engine[i][id]);
return -EIO;
}
}
diff --git a/drivers/gpu/drm/i915/selftests/igt_live_test.h b/drivers/gpu/drm/i915/selftests/igt_live_test.h
index 36ed42736c52..83e3ad430922 100644
--- a/drivers/gpu/drm/i915/selftests/igt_live_test.h
+++ b/drivers/gpu/drm/i915/selftests/igt_live_test.h
@@ -7,6 +7,7 @@
#ifndef IGT_LIVE_TEST_H
#define IGT_LIVE_TEST_H
+#include "gt/intel_gt_defines.h" /* for I915_MAX_GT */
#include "gt/intel_engine.h" /* for I915_NUM_ENGINES */
struct drm_i915_private;
@@ -17,7 +18,7 @@ struct igt_live_test {
const char *name;
unsigned int reset_global;
- unsigned int reset_engine[I915_NUM_ENGINES];
+ unsigned int reset_engine[I915_MAX_GT][I915_NUM_ENGINES];
};
/*
diff --git a/drivers/gpu/drm/imagination/pvr_cccb.h b/drivers/gpu/drm/imagination/pvr_cccb.h
index f35b3d4c9575..943fe8f2c963 100644
--- a/drivers/gpu/drm/imagination/pvr_cccb.h
+++ b/drivers/gpu/drm/imagination/pvr_cccb.h
@@ -86,6 +86,7 @@ pvr_cccb_get_size_of_cmd_with_hdr(u32 cmd_size)
/**
* pvr_cccb_cmdseq_can_fit() - Check if a command sequence can fit in the CCCB.
+ * @pvr_cccb: Target Client CCB.
* @size: Command sequence size.
*
* Returns:
diff --git a/drivers/gpu/drm/imagination/pvr_device.c b/drivers/gpu/drm/imagination/pvr_device.c
index 8499becf4fbb..1704c0268589 100644
--- a/drivers/gpu/drm/imagination/pvr_device.c
+++ b/drivers/gpu/drm/imagination/pvr_device.c
@@ -105,12 +105,12 @@ static int pvr_device_clk_init(struct pvr_device *pvr_dev)
sys_clk = devm_clk_get_optional(drm_dev->dev, "sys");
if (IS_ERR(sys_clk))
- return dev_err_probe(drm_dev->dev, PTR_ERR(core_clk),
+ return dev_err_probe(drm_dev->dev, PTR_ERR(sys_clk),
"failed to get sys clock\n");
mem_clk = devm_clk_get_optional(drm_dev->dev, "mem");
if (IS_ERR(mem_clk))
- return dev_err_probe(drm_dev->dev, PTR_ERR(core_clk),
+ return dev_err_probe(drm_dev->dev, PTR_ERR(mem_clk),
"failed to get mem clock\n");
pvr_dev->core_clk = core_clk;
@@ -127,7 +127,7 @@ static int pvr_device_clk_init(struct pvr_device *pvr_dev)
* This is called any time we receive a FW event. It iterates over all
* active queues and calls pvr_queue_process() on them.
*/
-void pvr_device_process_active_queues(struct pvr_device *pvr_dev)
+static void pvr_device_process_active_queues(struct pvr_device *pvr_dev)
{
struct pvr_queue *queue, *tmp_queue;
LIST_HEAD(active_queues);
@@ -286,8 +286,8 @@ pvr_request_firmware(struct pvr_device *pvr_dev)
filename = pvr_build_firmware_filename(pvr_dev, "powervr/rogue",
PVR_FW_VERSION_MAJOR);
- if (IS_ERR(filename))
- return PTR_ERR(filename);
+ if (!filename)
+ return -ENOMEM;
/*
* This function takes a copy of &filename, meaning we can free our
diff --git a/drivers/gpu/drm/imagination/pvr_device.h b/drivers/gpu/drm/imagination/pvr_device.h
index e07655fc65e8..2ca7e535799f 100644
--- a/drivers/gpu/drm/imagination/pvr_device.h
+++ b/drivers/gpu/drm/imagination/pvr_device.h
@@ -203,17 +203,29 @@ struct pvr_device {
struct mutex lock;
} queues;
+ /**
+ * @watchdog: Watchdog for communications with firmware.
+ */
struct {
/** @work: Work item for watchdog callback. */
struct delayed_work work;
- /** @old_kccb_cmds_executed: KCCB command execution count at last watchdog poll. */
+ /**
+ * @old_kccb_cmds_executed: KCCB command execution count at last
+ * watchdog poll.
+ */
u32 old_kccb_cmds_executed;
- /** @kccb_stall_count: Number of watchdog polls KCCB has been stalled for. */
+ /**
+ * @kccb_stall_count: Number of watchdog polls KCCB has been
+ * stalled for.
+ */
u32 kccb_stall_count;
} watchdog;
+ /**
+ * @kccb: Circular buffer for communications with firmware.
+ */
struct {
/** @ccb: Kernel CCB. */
struct pvr_ccb ccb;
@@ -225,8 +237,8 @@ struct pvr_device {
struct pvr_fw_object *rtn_obj;
/**
- * @rtn: Pointer to CPU mapping of KCCB return slots. Must be accessed by
- * READ_ONCE()/WRITE_ONCE().
+ * @rtn: Pointer to CPU mapping of KCCB return slots. Must be
+ * accessed by READ_ONCE()/WRITE_ONCE().
*/
u32 *rtn;
@@ -293,14 +305,13 @@ struct pvr_file {
/**
* @pvr_dev: A reference to the powervr-specific wrapper for the
- * associated device. Saves on repeated calls to
- * to_pvr_device().
+ * associated device. Saves on repeated calls to to_pvr_device().
*/
struct pvr_device *pvr_dev;
/**
* @ctx_handles: Array of contexts belonging to this file. Array members
- * are of type "struct pvr_context *".
+ * are of type "struct pvr_context *".
*
* This array is used to allocate handles returned to userspace.
*/
diff --git a/drivers/gpu/drm/imagination/pvr_device_info.c b/drivers/gpu/drm/imagination/pvr_device_info.c
index 11e6bef52ecd..d3301cde7d11 100644
--- a/drivers/gpu/drm/imagination/pvr_device_info.c
+++ b/drivers/gpu/drm/imagination/pvr_device_info.c
@@ -227,7 +227,8 @@ int pvr_device_info_set_features(struct pvr_device *pvr_dev, const u64 *features
/* Verify no unsupported values in the bitmask. */
if (features_size > mapping_max_size) {
drm_warn(from_pvr_device(pvr_dev), "Unsupported features in firmware image");
- } else if (features_size == mapping_max_size && (mapping_max & 63)) {
+ } else if (features_size == mapping_max_size &&
+ ((mapping_max & 63) != 0)) {
u64 invalid_mask = ~0ull << (mapping_max & 63);
if (features[features_size - 1] & invalid_mask)
diff --git a/drivers/gpu/drm/imagination/pvr_free_list.c b/drivers/gpu/drm/imagination/pvr_free_list.c
index c61fd417edcb..5e51bc980751 100644
--- a/drivers/gpu/drm/imagination/pvr_free_list.c
+++ b/drivers/gpu/drm/imagination/pvr_free_list.c
@@ -255,7 +255,7 @@ pvr_free_list_insert_pages_locked(struct pvr_free_list *free_list,
if (!num_pages)
break;
- };
+ }
/* clang-format on */
/* Make sure our free_list update is flushed. */
diff --git a/drivers/gpu/drm/imagination/pvr_fw.h b/drivers/gpu/drm/imagination/pvr_fw.h
index 5cd3ef08d82b..b7966bd574a9 100644
--- a/drivers/gpu/drm/imagination/pvr_fw.h
+++ b/drivers/gpu/drm/imagination/pvr_fw.h
@@ -481,7 +481,8 @@ pvr_fw_object_unmap_and_destroy(struct pvr_fw_object *fw_obj)
}
/**
- * pvr_fw_get_dma_addr() - Get DMA address for given offset in firmware object
+ * pvr_fw_object_get_dma_addr() - Get DMA address for given offset in firmware
+ * object.
* @fw_obj: Pointer to object to lookup address in.
* @offset: Offset within object to lookup address at.
* @dma_addr_out: Pointer to location to store DMA address.
diff --git a/drivers/gpu/drm/imagination/pvr_fw_info.h b/drivers/gpu/drm/imagination/pvr_fw_info.h
index ad5d44a3067a..c3639440610e 100644
--- a/drivers/gpu/drm/imagination/pvr_fw_info.h
+++ b/drivers/gpu/drm/imagination/pvr_fw_info.h
@@ -122,13 +122,13 @@ struct pvr_fw_layout_entry {
* struct pvr_fw_device_info_header - Device information header.
*/
struct pvr_fw_device_info_header {
- /* BRN Mask size (in u64s). */
+ /** @brn_mask_size: BRN mask size (in u64s). */
u64 brn_mask_size;
- /* ERN Mask size (in u64s). */
+ /** @ern_mask_size: ERN mask size (in u64s). */
u64 ern_mask_size;
- /* Feature Mask size (in u64s). */
+ /** @feature_mask_size: Feature mask size (in u64s). */
u64 feature_mask_size;
- /* Feature Parameter size (in u64s). */
+ /** @feature_param_size: Feature parameter size (in u64s). */
u64 feature_param_size;
};
diff --git a/drivers/gpu/drm/imagination/pvr_fw_meta.c b/drivers/gpu/drm/imagination/pvr_fw_meta.c
index 119934c36184..c39beb70c317 100644
--- a/drivers/gpu/drm/imagination/pvr_fw_meta.c
+++ b/drivers/gpu/drm/imagination/pvr_fw_meta.c
@@ -4,6 +4,7 @@
#include "pvr_device.h"
#include "pvr_fw.h"
#include "pvr_fw_info.h"
+#include "pvr_fw_meta.h"
#include "pvr_gem.h"
#include "pvr_rogue_cr_defs.h"
#include "pvr_rogue_meta.h"
diff --git a/drivers/gpu/drm/imagination/pvr_fw_trace.c b/drivers/gpu/drm/imagination/pvr_fw_trace.c
index 87a42fb6ace6..7159fc479001 100644
--- a/drivers/gpu/drm/imagination/pvr_fw_trace.c
+++ b/drivers/gpu/drm/imagination/pvr_fw_trace.c
@@ -121,6 +121,8 @@ void pvr_fw_trace_fini(struct pvr_device *pvr_dev)
pvr_fw_object_unmap_and_destroy(fw_trace->tracebuf_ctrl_obj);
}
+#if defined(CONFIG_DEBUG_FS)
+
/**
* update_logtype() - Send KCCB command to trigger FW to update logtype
* @pvr_dev: Target PowerVR device
@@ -165,52 +167,6 @@ err_up_read:
return err;
}
-#if defined(CONFIG_DEBUG_FS)
-
-static int fw_trace_group_mask_show(struct seq_file *m, void *data)
-{
- struct pvr_device *pvr_dev = m->private;
-
- seq_printf(m, "%08x\n", pvr_dev->fw_dev.fw_trace.group_mask);
-
- return 0;
-}
-
-static int fw_trace_group_mask_open(struct inode *inode, struct file *file)
-{
- return single_open(file, fw_trace_group_mask_show, inode->i_private);
-}
-
-static ssize_t fw_trace_group_mask_write(struct file *file, const char __user *ubuf, size_t len,
- loff_t *offp)
-{
- struct seq_file *m = file->private_data;
- struct pvr_device *pvr_dev = m->private;
- u32 new_group_mask;
- int err;
-
- err = kstrtouint_from_user(ubuf, len, 0, &new_group_mask);
- if (err)
- return err;
-
- err = update_logtype(pvr_dev, new_group_mask);
- if (err)
- return err;
-
- pvr_dev->fw_dev.fw_trace.group_mask = new_group_mask;
-
- return (ssize_t)len;
-}
-
-static const struct file_operations pvr_fw_trace_group_mask_fops = {
- .owner = THIS_MODULE,
- .open = fw_trace_group_mask_open,
- .read = seq_read,
- .write = fw_trace_group_mask_write,
- .llseek = default_llseek,
- .release = single_release,
-};
-
struct pvr_fw_trace_seq_data {
/** @buffer: Pointer to copy of trace data. */
u32 *buffer;
diff --git a/drivers/gpu/drm/imagination/pvr_hwrt.h b/drivers/gpu/drm/imagination/pvr_hwrt.h
index 76992948d047..676070b20c3b 100644
--- a/drivers/gpu/drm/imagination/pvr_hwrt.h
+++ b/drivers/gpu/drm/imagination/pvr_hwrt.h
@@ -64,6 +64,7 @@ struct pvr_hwrt_dataset {
/** @common_fw_obj: FW object representing common FW-side structure. */
struct pvr_fw_object *common_fw_obj;
+ /** @common: Common HWRT data. */
struct rogue_fwif_hwrtdata_common common;
/** @data: HWRT data structures belonging to this set. */
diff --git a/drivers/gpu/drm/imagination/pvr_job.c b/drivers/gpu/drm/imagination/pvr_job.c
index 9d0812710295..04139da6c04d 100644
--- a/drivers/gpu/drm/imagination/pvr_job.c
+++ b/drivers/gpu/drm/imagination/pvr_job.c
@@ -378,7 +378,7 @@ prepare_job_syncs(struct pvr_file *pvr_file,
/**
* prepare_job_syncs_for_each() - Prepare all sync objects for an array of jobs.
- * @file: PowerVR file.
+ * @pvr_file: PowerVR file.
* @job_data: Array of precreated jobs and their sync_ops.
* @job_count: Number of jobs.
* @signal_array: xarray to receive signal sync objects.
@@ -696,8 +696,6 @@ pvr_jobs_link_geom_frag(struct pvr_job_data *job_data, u32 *job_count)
* @pvr_dev: Target PowerVR device.
* @pvr_file: Pointer to PowerVR file structure.
* @args: Ioctl args.
- * @job_count: Number of jobs in @jobs_args. On error this will be updated
- * with the index into @jobs_args where the error occurred.
*
* This initial implementation is entirely synchronous; on return the GPU will
* be idle. This will not be the case for future implementations.
diff --git a/drivers/gpu/drm/imagination/pvr_mmu.c b/drivers/gpu/drm/imagination/pvr_mmu.c
index b71d30e5f380..c8562bfc0dcd 100644
--- a/drivers/gpu/drm/imagination/pvr_mmu.c
+++ b/drivers/gpu/drm/imagination/pvr_mmu.c
@@ -335,8 +335,9 @@ pvr_mmu_backing_page_fini(struct pvr_mmu_backing_page *page)
/**
* pvr_mmu_backing_page_sync() - Flush a MMU backing page from the CPU to the
- * device.
+ * device.
* @page: Target backing page.
+ * @flags: MMU flush flags. Must be one of %PVR_MMU_SYNC_LEVEL_*_FLAGS.
*
* .. caution::
*
diff --git a/drivers/gpu/drm/imagination/pvr_queue.c b/drivers/gpu/drm/imagination/pvr_queue.c
index d65c3fbedf5a..5ed9c98fb599 100644
--- a/drivers/gpu/drm/imagination/pvr_queue.c
+++ b/drivers/gpu/drm/imagination/pvr_queue.c
@@ -1292,7 +1292,7 @@ struct pvr_queue *pvr_queue_create(struct pvr_context *ctx,
goto err_release_ufo;
err = drm_sched_entity_init(&queue->entity,
- DRM_SCHED_PRIORITY_MIN,
+ DRM_SCHED_PRIORITY_KERNEL,
&sched, 1, &ctx->faulty);
if (err)
goto err_sched_fini;
diff --git a/drivers/gpu/drm/imagination/pvr_queue.h b/drivers/gpu/drm/imagination/pvr_queue.h
index b5ce2c742150..e06ced69302f 100644
--- a/drivers/gpu/drm/imagination/pvr_queue.h
+++ b/drivers/gpu/drm/imagination/pvr_queue.h
@@ -50,7 +50,7 @@ struct pvr_queue_cccb_fence_ctx {
*/
struct pvr_job *job;
- /** @lock: Lock protecting access to the job object. */
+ /** @job_lock: Lock protecting access to the job object. */
struct mutex job_lock;
};
@@ -114,7 +114,7 @@ struct pvr_queue {
} timeline_ufo;
/**
- * last_queued_job_scheduled_fence: The scheduled fence of the last
+ * @last_queued_job_scheduled_fence: The scheduled fence of the last
* job queued to this queue.
*
* We use it to insert frag -> geom dependencies when issuing combined
diff --git a/drivers/gpu/drm/imagination/pvr_rogue_fwif_sf.h b/drivers/gpu/drm/imagination/pvr_rogue_fwif_sf.h
index 571954182f33..56e11009e123 100644
--- a/drivers/gpu/drm/imagination/pvr_rogue_fwif_sf.h
+++ b/drivers/gpu/drm/imagination/pvr_rogue_fwif_sf.h
@@ -497,7 +497,7 @@ static const struct rogue_km_stid_fmt stid_fmts[] = {
{ ROGUE_FW_LOG_CREATESFID(213, ROGUE_FW_GROUP_MAIN, 1),
"Safety Watchdog threshold period set to 0x%x clock cycles" },
{ ROGUE_FW_LOG_CREATESFID(214, ROGUE_FW_GROUP_MAIN, 0),
- "MTS Safety Event trigged by the safety watchdog." },
+ "MTS Safety Event triggered by the safety watchdog." },
{ ROGUE_FW_LOG_CREATESFID(215, ROGUE_FW_GROUP_MAIN, 3),
"DM%d USC tasks range limit 0 - %d, stride %d" },
{ ROGUE_FW_LOG_CREATESFID(216, ROGUE_FW_GROUP_MAIN, 1),
@@ -1114,7 +1114,7 @@ static const struct rogue_km_stid_fmt stid_fmts[] = {
{ ROGUE_FW_LOG_CREATESFID(39, ROGUE_FW_GROUP_SPM, 2),
"3DMemFree matches freelist 0x%08x (FL type = %u)" },
{ ROGUE_FW_LOG_CREATESFID(40, ROGUE_FW_GROUP_SPM, 0),
- "Raise the 3DMemFreeDedected flag" },
+ "Raise the 3DMemFreeDetected flag" },
{ ROGUE_FW_LOG_CREATESFID(41, ROGUE_FW_GROUP_SPM, 1),
"Wait for pending grow on Freelist 0x%08x" },
{ ROGUE_FW_LOG_CREATESFID(42, ROGUE_FW_GROUP_SPM, 1),
diff --git a/drivers/gpu/drm/imagination/pvr_vm.c b/drivers/gpu/drm/imagination/pvr_vm.c
index 3ad1366294b9..f42345fbe4bf 100644
--- a/drivers/gpu/drm/imagination/pvr_vm.c
+++ b/drivers/gpu/drm/imagination/pvr_vm.c
@@ -42,7 +42,7 @@ struct pvr_vm_context {
/** @mmu_ctx: The context for binding to physical memory. */
struct pvr_mmu_context *mmu_ctx;
- /** @gpuva_mgr: GPUVA manager object associated with this context. */
+ /** @gpuvm_mgr: GPUVM object associated with this context. */
struct drm_gpuvm gpuvm_mgr;
/** @lock: Global lock on this VM. */
@@ -64,6 +64,12 @@ struct pvr_vm_context {
struct drm_gem_object dummy_gem;
};
+static inline
+struct pvr_vm_context *to_pvr_vm_context(struct drm_gpuvm *gpuvm)
+{
+ return container_of(gpuvm, struct pvr_vm_context, gpuvm_mgr);
+}
+
struct pvr_vm_context *pvr_vm_context_get(struct pvr_vm_context *vm_ctx)
{
if (vm_ctx)
@@ -108,12 +114,6 @@ struct pvr_vm_gpuva {
struct drm_gpuva base;
};
-static __always_inline
-struct pvr_vm_gpuva *to_pvr_vm_gpuva(struct drm_gpuva *gpuva)
-{
- return container_of(gpuva, struct pvr_vm_gpuva, base);
-}
-
enum pvr_vm_bind_type {
PVR_VM_BIND_TYPE_MAP,
PVR_VM_BIND_TYPE_UNMAP,
@@ -224,6 +224,7 @@ pvr_vm_bind_op_map_init(struct pvr_vm_bind_op *bind_op,
struct pvr_gem_object *pvr_obj, u64 offset,
u64 device_addr, u64 size)
{
+ struct drm_gem_object *obj = gem_from_pvr_gem(pvr_obj);
const bool is_user = vm_ctx == vm_ctx->pvr_dev->kernel_vm_ctx;
const u64 pvr_obj_size = pvr_gem_object_size(pvr_obj);
struct sg_table *sgt;
@@ -238,17 +239,18 @@ pvr_vm_bind_op_map_init(struct pvr_vm_bind_op *bind_op,
return -EINVAL;
}
- if (!pvr_device_addr_and_size_are_valid(device_addr, size) ||
+ if (!pvr_device_addr_and_size_are_valid(vm_ctx, device_addr, size) ||
offset & ~PAGE_MASK || size & ~PAGE_MASK ||
offset >= pvr_obj_size || offset_plus_size > pvr_obj_size)
return -EINVAL;
bind_op->type = PVR_VM_BIND_TYPE_MAP;
- bind_op->gpuvm_bo = drm_gpuvm_bo_create(&vm_ctx->gpuvm_mgr,
- gem_from_pvr_gem(pvr_obj));
- if (!bind_op->gpuvm_bo)
- return -ENOMEM;
+ dma_resv_lock(obj->resv, NULL);
+ bind_op->gpuvm_bo = drm_gpuvm_bo_obtain(&vm_ctx->gpuvm_mgr, obj);
+ dma_resv_unlock(obj->resv);
+ if (IS_ERR(bind_op->gpuvm_bo))
+ return PTR_ERR(bind_op->gpuvm_bo);
bind_op->new_va = kzalloc(sizeof(*bind_op->new_va), GFP_KERNEL);
bind_op->prev_va = kzalloc(sizeof(*bind_op->prev_va), GFP_KERNEL);
@@ -293,7 +295,7 @@ pvr_vm_bind_op_unmap_init(struct pvr_vm_bind_op *bind_op,
{
int err;
- if (!pvr_device_addr_and_size_are_valid(device_addr, size))
+ if (!pvr_device_addr_and_size_are_valid(vm_ctx, device_addr, size))
return -EINVAL;
bind_op->type = PVR_VM_BIND_TYPE_UNMAP;
@@ -325,48 +327,6 @@ err_bind_op_fini:
return err;
}
-static int
-pvr_vm_bind_op_lock_resvs(struct drm_exec *exec, struct pvr_vm_bind_op *bind_op)
-{
- drm_exec_until_all_locked(exec) {
- struct drm_gem_object *r_obj = &bind_op->vm_ctx->dummy_gem;
- struct drm_gpuvm *gpuvm = &bind_op->vm_ctx->gpuvm_mgr;
- struct pvr_gem_object *pvr_obj = bind_op->pvr_obj;
- struct drm_gpuvm_bo *gpuvm_bo;
-
- /* Acquire lock on the vm_context's reserve object. */
- int err = drm_exec_lock_obj(exec, r_obj);
-
- drm_exec_retry_on_contention(exec);
- if (err)
- return err;
-
- /* Acquire lock on all BOs in the context. */
- list_for_each_entry(gpuvm_bo, &gpuvm->extobj.list,
- list.entry.extobj) {
- err = drm_exec_lock_obj(exec, gpuvm_bo->obj);
-
- drm_exec_retry_on_contention(exec);
- if (err)
- return err;
- }
-
- /* Unmap operations don't have an object to lock. */
- if (!pvr_obj)
- break;
-
- /* Acquire lock on the GEM being mapped. */
- err = drm_exec_lock_obj(exec,
- gem_from_pvr_gem(bind_op->pvr_obj));
-
- drm_exec_retry_on_contention(exec);
- if (err)
- return err;
- }
-
- return 0;
-}
-
/**
* pvr_vm_gpuva_map() - Insert a mapping into a memory context.
* @op: gpuva op containing the remap details.
@@ -503,6 +463,7 @@ pvr_device_addr_is_valid(u64 device_addr)
/**
* pvr_device_addr_and_size_are_valid() - Tests whether a device-virtual
* address and associated size are both valid.
+ * @vm_ctx: Target VM context.
* @device_addr: Virtual device address to test.
* @size: Size of the range based at @device_addr to test.
*
@@ -521,16 +482,18 @@ pvr_device_addr_is_valid(u64 device_addr)
* * %false otherwise.
*/
bool
-pvr_device_addr_and_size_are_valid(u64 device_addr, u64 size)
+pvr_device_addr_and_size_are_valid(struct pvr_vm_context *vm_ctx,
+ u64 device_addr, u64 size)
{
return pvr_device_addr_is_valid(device_addr) &&
+ drm_gpuvm_range_valid(&vm_ctx->gpuvm_mgr, device_addr, size) &&
size != 0 && (size & ~PVR_DEVICE_PAGE_MASK) == 0 &&
(device_addr + size <= PVR_PAGE_TABLE_ADDR_SPACE_SIZE);
}
-void pvr_gpuvm_free(struct drm_gpuvm *gpuvm)
+static void pvr_gpuvm_free(struct drm_gpuvm *gpuvm)
{
-
+ kfree(to_pvr_vm_context(gpuvm));
}
static const struct drm_gpuvm_ops pvr_vm_gpuva_ops = {
@@ -650,12 +613,11 @@ pvr_vm_context_release(struct kref *ref_count)
WARN_ON(pvr_vm_unmap(vm_ctx, vm_ctx->gpuvm_mgr.mm_start,
vm_ctx->gpuvm_mgr.mm_range));
- drm_gpuvm_put(&vm_ctx->gpuvm_mgr);
pvr_mmu_context_destroy(vm_ctx->mmu_ctx);
drm_gem_private_object_fini(&vm_ctx->dummy_gem);
mutex_destroy(&vm_ctx->lock);
- kfree(vm_ctx);
+ drm_gpuvm_put(&vm_ctx->gpuvm_mgr);
}
/**
@@ -721,6 +683,20 @@ void pvr_destroy_vm_contexts_for_file(struct pvr_file *pvr_file)
}
}
+static int
+pvr_vm_lock_extra(struct drm_gpuvm_exec *vm_exec)
+{
+ struct pvr_vm_bind_op *bind_op = vm_exec->extra.priv;
+ struct pvr_gem_object *pvr_obj = bind_op->pvr_obj;
+
+ /* Unmap operations don't have an object to lock. */
+ if (!pvr_obj)
+ return 0;
+
+ /* Acquire lock on the GEM being mapped. */
+ return drm_exec_lock_obj(&vm_exec->exec, gem_from_pvr_gem(pvr_obj));
+}
+
/**
* pvr_vm_map() - Map a section of physical memory into a section of
* device-virtual memory.
@@ -748,7 +724,15 @@ pvr_vm_map(struct pvr_vm_context *vm_ctx, struct pvr_gem_object *pvr_obj,
u64 pvr_obj_offset, u64 device_addr, u64 size)
{
struct pvr_vm_bind_op bind_op = {0};
- struct drm_exec exec;
+ struct drm_gpuvm_exec vm_exec = {
+ .vm = &vm_ctx->gpuvm_mgr,
+ .flags = DRM_EXEC_INTERRUPTIBLE_WAIT |
+ DRM_EXEC_IGNORE_DUPLICATES,
+ .extra = {
+ .fn = pvr_vm_lock_extra,
+ .priv = &bind_op,
+ },
+ };
int err = pvr_vm_bind_op_map_init(&bind_op, vm_ctx, pvr_obj,
pvr_obj_offset, device_addr,
@@ -757,18 +741,15 @@ pvr_vm_map(struct pvr_vm_context *vm_ctx, struct pvr_gem_object *pvr_obj,
if (err)
return err;
- drm_exec_init(&exec,
- DRM_EXEC_INTERRUPTIBLE_WAIT | DRM_EXEC_IGNORE_DUPLICATES);
-
pvr_gem_object_get(pvr_obj);
- err = pvr_vm_bind_op_lock_resvs(&exec, &bind_op);
+ err = drm_gpuvm_exec_lock(&vm_exec);
if (err)
goto err_cleanup;
err = pvr_vm_bind_op_exec(&bind_op);
- drm_exec_fini(&exec);
+ drm_gpuvm_exec_unlock(&vm_exec);
err_cleanup:
pvr_vm_bind_op_fini(&bind_op);
@@ -794,24 +775,28 @@ int
pvr_vm_unmap(struct pvr_vm_context *vm_ctx, u64 device_addr, u64 size)
{
struct pvr_vm_bind_op bind_op = {0};
- struct drm_exec exec;
+ struct drm_gpuvm_exec vm_exec = {
+ .vm = &vm_ctx->gpuvm_mgr,
+ .flags = DRM_EXEC_INTERRUPTIBLE_WAIT |
+ DRM_EXEC_IGNORE_DUPLICATES,
+ .extra = {
+ .fn = pvr_vm_lock_extra,
+ .priv = &bind_op,
+ },
+ };
int err = pvr_vm_bind_op_unmap_init(&bind_op, vm_ctx, device_addr,
size);
-
if (err)
return err;
- drm_exec_init(&exec,
- DRM_EXEC_INTERRUPTIBLE_WAIT | DRM_EXEC_IGNORE_DUPLICATES);
-
- err = pvr_vm_bind_op_lock_resvs(&exec, &bind_op);
+ err = drm_gpuvm_exec_lock(&vm_exec);
if (err)
goto err_cleanup;
err = pvr_vm_bind_op_exec(&bind_op);
- drm_exec_fini(&exec);
+ drm_gpuvm_exec_unlock(&vm_exec);
err_cleanup:
pvr_vm_bind_op_fini(&bind_op);
diff --git a/drivers/gpu/drm/imagination/pvr_vm.h b/drivers/gpu/drm/imagination/pvr_vm.h
index cf8b97553dc8..f2a6463f2b05 100644
--- a/drivers/gpu/drm/imagination/pvr_vm.h
+++ b/drivers/gpu/drm/imagination/pvr_vm.h
@@ -29,7 +29,8 @@ struct drm_exec;
/* Functions defined in pvr_vm.c */
bool pvr_device_addr_is_valid(u64 device_addr);
-bool pvr_device_addr_and_size_are_valid(u64 device_addr, u64 size);
+bool pvr_device_addr_and_size_are_valid(struct pvr_vm_context *vm_ctx,
+ u64 device_addr, u64 size);
struct pvr_vm_context *pvr_vm_create_context(struct pvr_device *pvr_dev,
bool is_userspace_context);
diff --git a/drivers/gpu/drm/imagination/pvr_vm_mips.c b/drivers/gpu/drm/imagination/pvr_vm_mips.c
index 7268cf6e630b..2bc7181a4c3e 100644
--- a/drivers/gpu/drm/imagination/pvr_vm_mips.c
+++ b/drivers/gpu/drm/imagination/pvr_vm_mips.c
@@ -57,6 +57,7 @@ pvr_vm_mips_init(struct pvr_device *pvr_dev)
PAGE_SIZE, DMA_TO_DEVICE);
if (dma_mapping_error(dev, mips_data->pt_dma_addr[page_nr])) {
err = -ENOMEM;
+ __free_page(mips_data->pt_pages[page_nr]);
goto err_free_pages;
}
}
@@ -79,13 +80,11 @@ pvr_vm_mips_init(struct pvr_device *pvr_dev)
return 0;
err_free_pages:
- for (; page_nr >= 0; page_nr--) {
- if (mips_data->pt_dma_addr[page_nr])
- dma_unmap_page(from_pvr_device(pvr_dev)->dev,
- mips_data->pt_dma_addr[page_nr], PAGE_SIZE, DMA_TO_DEVICE);
+ while (--page_nr >= 0) {
+ dma_unmap_page(from_pvr_device(pvr_dev)->dev,
+ mips_data->pt_dma_addr[page_nr], PAGE_SIZE, DMA_TO_DEVICE);
- if (mips_data->pt_pages[page_nr])
- __free_page(mips_data->pt_pages[page_nr]);
+ __free_page(mips_data->pt_pages[page_nr]);
}
return err;
diff --git a/drivers/gpu/drm/imx/ipuv3/imx-ldb.c b/drivers/gpu/drm/imx/ipuv3/imx-ldb.c
index 989eca32d325..53840ab054c7 100644
--- a/drivers/gpu/drm/imx/ipuv3/imx-ldb.c
+++ b/drivers/gpu/drm/imx/ipuv3/imx-ldb.c
@@ -12,8 +12,10 @@
#include <linux/mfd/syscon.h>
#include <linux/mfd/syscon/imx6q-iomuxc-gpr.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/of_graph.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
#include <linux/regmap.h>
#include <linux/videodev2.h>
@@ -617,7 +619,6 @@ static int imx_ldb_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
- const struct of_device_id *of_id = of_match_device(imx_ldb_dt_ids, dev);
struct device_node *child;
struct imx_ldb *imx_ldb;
int dual;
@@ -638,9 +639,7 @@ static int imx_ldb_probe(struct platform_device *pdev)
regmap_write(imx_ldb->regmap, IOMUXC_GPR2, 0);
imx_ldb->dev = dev;
-
- if (of_id)
- imx_ldb->lvds_mux = of_id->data;
+ imx_ldb->lvds_mux = device_get_match_data(dev);
dual = of_property_read_bool(np, "fsl,dual-channel");
if (dual)
diff --git a/drivers/gpu/drm/imx/lcdc/imx-lcdc.c b/drivers/gpu/drm/imx/lcdc/imx-lcdc.c
index 0902983374d0..43ddf3a9810b 100644
--- a/drivers/gpu/drm/imx/lcdc/imx-lcdc.c
+++ b/drivers/gpu/drm/imx/lcdc/imx-lcdc.c
@@ -342,21 +342,12 @@ static const struct drm_mode_config_helper_funcs imx_lcdc_mode_config_helpers =
.atomic_commit_tail = drm_atomic_helper_commit_tail_rpm,
};
-static void imx_lcdc_release(struct drm_device *drm)
-{
- struct imx_lcdc *lcdc = imx_lcdc_from_drmdev(drm);
-
- drm_kms_helper_poll_fini(drm);
- kfree(lcdc);
-}
-
DEFINE_DRM_GEM_DMA_FOPS(imx_lcdc_drm_fops);
static struct drm_driver imx_lcdc_drm_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
.fops = &imx_lcdc_drm_fops,
DRM_GEM_DMA_DRIVER_OPS_VMAP,
- .release = imx_lcdc_release,
.name = "imx-lcdc",
.desc = "i.MX LCDC driver",
.date = "20200716",
diff --git a/drivers/gpu/drm/loongson/Kconfig b/drivers/gpu/drm/loongson/Kconfig
index df6946d505fa..8e59753e532d 100644
--- a/drivers/gpu/drm/loongson/Kconfig
+++ b/drivers/gpu/drm/loongson/Kconfig
@@ -3,6 +3,7 @@
config DRM_LOONGSON
tristate "DRM support for Loongson Graphics"
depends on DRM && PCI && MMU
+ depends on LOONGARCH || MIPS || COMPILE_TEST
select DRM_KMS_HELPER
select DRM_TTM
select I2C
diff --git a/drivers/gpu/drm/loongson/lsdc_plane.c b/drivers/gpu/drm/loongson/lsdc_plane.c
index 0d5094633222..d227a2c1dcf1 100644
--- a/drivers/gpu/drm/loongson/lsdc_plane.c
+++ b/drivers/gpu/drm/loongson/lsdc_plane.c
@@ -9,7 +9,6 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_atomic_helper.h>
-#include <drm/drm_plane_helper.h>
#include "lsdc_drv.h"
#include "lsdc_regs.h"
diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h
index 4252e3839fbc..2bfcb222e353 100644
--- a/drivers/gpu/drm/msm/msm_gpu.h
+++ b/drivers/gpu/drm/msm/msm_gpu.h
@@ -347,7 +347,7 @@ struct msm_gpu_perfcntr {
* DRM_SCHED_PRIORITY_KERNEL priority level is treated specially in some
* cases, so we don't use it (no need for kernel generated jobs).
*/
-#define NR_SCHED_PRIORITIES (1 + DRM_SCHED_PRIORITY_HIGH - DRM_SCHED_PRIORITY_MIN)
+#define NR_SCHED_PRIORITIES (1 + DRM_SCHED_PRIORITY_LOW - DRM_SCHED_PRIORITY_HIGH)
/**
* struct msm_file_private - per-drm_file context
diff --git a/drivers/gpu/drm/mxsfb/mxsfb_drv.c b/drivers/gpu/drm/mxsfb/mxsfb_drv.c
index 625c1bfc4173..b483ef48216a 100644
--- a/drivers/gpu/drm/mxsfb/mxsfb_drv.c
+++ b/drivers/gpu/drm/mxsfb/mxsfb_drv.c
@@ -11,9 +11,10 @@
#include <linux/clk.h>
#include <linux/dma-mapping.h>
#include <linux/io.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
+#include <linux/property.h>
#include <linux/pm_runtime.h>
#include <drm/drm_atomic_helper.h>
@@ -346,18 +347,13 @@ MODULE_DEVICE_TABLE(of, mxsfb_dt_ids);
static int mxsfb_probe(struct platform_device *pdev)
{
struct drm_device *drm;
- const struct of_device_id *of_id =
- of_match_device(mxsfb_dt_ids, &pdev->dev);
int ret;
- if (!pdev->dev.of_node)
- return -ENODEV;
-
drm = drm_dev_alloc(&mxsfb_driver, &pdev->dev);
if (IS_ERR(drm))
return PTR_ERR(drm);
- ret = mxsfb_load(drm, of_id->data);
+ ret = mxsfb_load(drm, device_get_match_data(&pdev->dev));
if (ret)
goto err_free;
diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c
index 11fe75b68e95..8d37a694b772 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/disp.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c
@@ -2476,7 +2476,7 @@ nv50_disp_atomic_commit(struct drm_device *dev,
err_cleanup:
if (ret)
- drm_atomic_helper_cleanup_planes(dev, state);
+ drm_atomic_helper_unprepare_planes(dev, state);
done:
pm_runtime_put_autosuspend(dev->dev);
return ret;
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/shared/msgq/inc/msgq/msgq_priv.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/shared/msgq/inc/msgq/msgq_priv.h
index 5a2f273d95c8..0e32e71e123f 100644
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/shared/msgq/inc/msgq/msgq_priv.h
+++ b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/shared/msgq/inc/msgq/msgq_priv.h
@@ -26,6 +26,49 @@
* DEALINGS IN THE SOFTWARE.
*/
+/**
+ * msgqTxHeader -- TX queue data structure
+ * @version: the version of this structure, must be 0
+ * @size: the size of the entire queue, including this header
+ * @msgSize: the padded size of queue element, 16 is minimum
+ * @msgCount: the number of elements in this queue
+ * @writePtr: head index of this queue
+ * @flags: 1 = swap the RX pointers
+ * @rxHdrOff: offset of readPtr in this structure
+ * @entryOff: offset of beginning of queue (msgqRxHeader), relative to
+ * beginning of this structure
+ *
+ * The command queue is a queue of RPCs that are sent from the driver to the
+ * GSP. The status queue is a queue of messages/responses from GSP-RM to the
+ * driver. Although the driver allocates memory for both queues, the command
+ * queue is owned by the driver and the status queue is owned by GSP-RM. In
+ * addition, the headers of the two queues must not share the same 4K page.
+ *
+ * Each queue is prefixed with this data structure. The idea is that a queue
+ * and its header are written to only by their owner. That is, only the
+ * driver writes to the command queue and command queue header, and only the
+ * GSP writes to the status (receive) queue and its header.
+ *
+ * This is enforced by the concept of "swapping" the RX pointers. This is
+ * why the 'flags' field must be set to 1. 'rxHdrOff' is how the GSP knows
+ * where the where the tail pointer of its status queue.
+ *
+ * When the driver writes a new RPC to the command queue, it updates writePtr.
+ * When it reads a new message from the status queue, it updates readPtr. In
+ * this way, the GSP knows when a new command is in the queue (it polls
+ * writePtr) and it knows how much free space is in the status queue (it
+ * checks readPtr). The driver never cares about how much free space is in
+ * the status queue.
+ *
+ * As usual, producers write to the head pointer, and consumers read from the
+ * tail pointer. When head == tail, the queue is empty.
+ *
+ * So to summarize:
+ * command.writePtr = head of command queue
+ * command.readPtr = tail of status queue
+ * status.writePtr = head of status queue
+ * status.readPtr = tail of command queue
+ */
typedef struct
{
NvU32 version; // queue version
@@ -38,6 +81,14 @@ typedef struct
NvU32 entryOff; // Offset of entries from start of backing store.
} msgqTxHeader;
+/**
+ * msgqRxHeader - RX queue data structure
+ * @readPtr: tail index of the other queue
+ *
+ * Although this is a separate struct, it could easily be merged into
+ * msgqTxHeader. msgqTxHeader.rxHdrOff is simply the offset of readPtr
+ * from the beginning of msgqTxHeader.
+ */
typedef struct
{
NvU32 readPtr; // message id of last message read
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_os_nvoc.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_os_nvoc.h
index 754c6af42f30..10121218f4d3 100644
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_os_nvoc.h
+++ b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_os_nvoc.h
@@ -38,7 +38,7 @@ typedef struct PACKED_REGISTRY_TABLE
{
NvU32 size;
NvU32 numEntries;
- PACKED_REGISTRY_ENTRY entries[0];
+ PACKED_REGISTRY_ENTRY entries[] __counted_by(numEntries);
} PACKED_REGISTRY_TABLE;
#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.c b/drivers/gpu/drm/nouveau/nouveau_abi16.c
index 2edd7bb13fae..a04156ca8390 100644
--- a/drivers/gpu/drm/nouveau/nouveau_abi16.c
+++ b/drivers/gpu/drm/nouveau/nouveau_abi16.c
@@ -127,21 +127,14 @@ nouveau_abi16_chan_fini(struct nouveau_abi16 *abi16,
{
struct nouveau_abi16_ntfy *ntfy, *temp;
- /* When a client exits without waiting for it's queued up jobs to
- * finish it might happen that we fault the channel. This is due to
- * drm_file_free() calling drm_gem_release() before the postclose()
- * callback. Hence, we can't tear down this scheduler entity before
- * uvmm mappings are unmapped. Currently, we can't detect this case.
- *
- * However, this should be rare and harmless, since the channel isn't
- * needed anymore.
- */
- nouveau_sched_entity_fini(&chan->sched_entity);
+ /* Cancel all jobs from the entity's queue. */
+ drm_sched_entity_fini(&chan->sched.entity);
- /* wait for all activity to stop before cleaning up */
if (chan->chan)
nouveau_channel_idle(chan->chan);
+ nouveau_sched_fini(&chan->sched);
+
/* cleanup notifier state */
list_for_each_entry_safe(ntfy, temp, &chan->notifiers, head) {
nouveau_abi16_ntfy_fini(chan, ntfy);
@@ -344,8 +337,8 @@ nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS)
if (ret)
goto done;
- ret = nouveau_sched_entity_init(&chan->sched_entity, &drm->sched,
- drm->sched_wq);
+ ret = nouveau_sched_init(&chan->sched, drm, drm->sched_wq,
+ chan->chan->dma.ib_max);
if (ret)
goto done;
diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.h b/drivers/gpu/drm/nouveau/nouveau_abi16.h
index 9f538486c10e..1f5e243c0c75 100644
--- a/drivers/gpu/drm/nouveau/nouveau_abi16.h
+++ b/drivers/gpu/drm/nouveau/nouveau_abi16.h
@@ -26,7 +26,7 @@ struct nouveau_abi16_chan {
struct nouveau_bo *ntfy;
struct nouveau_vma *ntfy_vma;
struct nvkm_mm heap;
- struct nouveau_sched_entity sched_entity;
+ struct nouveau_sched sched;
};
struct nouveau_abi16 {
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 7afad86da64b..00cc7d1abaa3 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -325,8 +325,9 @@ nouveau_bo_alloc(struct nouveau_cli *cli, u64 *size, int *align, u32 domain,
(!vmm->page[i].host || vmm->page[i].shift > PAGE_SHIFT))
continue;
- if (pi < 0)
- pi = i;
+ /* pick the last one as it will be smallest. */
+ pi = i;
+
/* Stop once the buffer is larger than the current page size. */
if (*size >= 1ULL << vmm->page[i].shift)
break;
@@ -1061,17 +1062,18 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict,
{
struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
struct nouveau_bo *nvbo = nouveau_bo(bo);
+ struct drm_gem_object *obj = &bo->base;
struct ttm_resource *old_reg = bo->resource;
struct nouveau_drm_tile *new_tile = NULL;
int ret = 0;
-
if (new_reg->mem_type == TTM_PL_TT) {
ret = nouveau_ttm_tt_bind(bo->bdev, bo->ttm, new_reg);
if (ret)
return ret;
}
+ drm_gpuvm_bo_gem_evict(obj, evict);
nouveau_bo_move_ntfy(bo, new_reg);
ret = ttm_bo_wait_ctx(bo, ctx);
if (ret)
@@ -1136,6 +1138,7 @@ out:
out_ntfy:
if (ret) {
nouveau_bo_move_ntfy(bo, bo->resource);
+ drm_gpuvm_bo_gem_evict(obj, !evict);
}
return ret;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index f603eaef1560..6f6c31a9937b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -201,9 +201,9 @@ nouveau_cli_fini(struct nouveau_cli *cli)
WARN_ON(!list_empty(&cli->worker));
usif_client_fini(cli);
+ nouveau_sched_fini(&cli->sched);
if (uvmm)
nouveau_uvmm_fini(uvmm);
- nouveau_sched_entity_fini(&cli->sched_entity);
nouveau_vmm_fini(&cli->svm);
nouveau_vmm_fini(&cli->vmm);
nvif_mmu_dtor(&cli->mmu);
@@ -310,8 +310,17 @@ nouveau_cli_init(struct nouveau_drm *drm, const char *sname,
cli->mem = &mems[ret];
- ret = nouveau_sched_entity_init(&cli->sched_entity, &drm->sched,
- drm->sched_wq);
+ /* Don't pass in the (shared) sched_wq in order to let
+ * nouveau_sched_init() create a dedicated one for VM_BIND jobs.
+ *
+ * This is required to ensure that for VM_BIND jobs free_job() work and
+ * run_job() work can always run concurrently and hence, free_job() work
+ * can never stall run_job() work. For EXEC jobs we don't have this
+ * requirement, since EXEC job's free_job() does not require to take any
+ * locks which indirectly or directly are held for allocations
+ * elsewhere.
+ */
+ ret = nouveau_sched_init(&cli->sched, drm, NULL, 1);
if (ret)
goto done;
@@ -582,13 +591,16 @@ nouveau_drm_device_init(struct drm_device *dev)
nvif_parent_ctor(&nouveau_parent, &drm->parent);
drm->master.base.object.parent = &drm->parent;
- ret = nouveau_sched_init(drm);
- if (ret)
+ drm->sched_wq = alloc_workqueue("nouveau_sched_wq_shared", 0,
+ WQ_MAX_ACTIVE);
+ if (!drm->sched_wq) {
+ ret = -ENOMEM;
goto fail_alloc;
+ }
ret = nouveau_cli_init(drm, "DRM-master", &drm->master);
if (ret)
- goto fail_sched;
+ goto fail_wq;
ret = nouveau_cli_init(drm, "DRM", &drm->client);
if (ret)
@@ -658,8 +670,8 @@ fail_ttm:
nouveau_cli_fini(&drm->client);
fail_master:
nouveau_cli_fini(&drm->master);
-fail_sched:
- nouveau_sched_fini(drm);
+fail_wq:
+ destroy_workqueue(drm->sched_wq);
fail_alloc:
nvif_parent_dtor(&drm->parent);
kfree(drm);
@@ -711,10 +723,9 @@ nouveau_drm_device_fini(struct drm_device *dev)
}
mutex_unlock(&drm->clients_lock);
- nouveau_sched_fini(drm);
-
nouveau_cli_fini(&drm->client);
nouveau_cli_fini(&drm->master);
+ destroy_workqueue(drm->sched_wq);
nvif_parent_dtor(&drm->parent);
mutex_destroy(&drm->clients_lock);
kfree(drm);
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index 7f7051df84a6..8a6d94c8b163 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -98,7 +98,7 @@ struct nouveau_cli {
bool disabled;
} uvmm;
- struct nouveau_sched_entity sched_entity;
+ struct nouveau_sched sched;
const struct nvif_mclass *mem;
@@ -258,6 +258,9 @@ struct nouveau_drm {
u64 context_base;
} *runl;
+ /* Workqueue used for channel schedulers. */
+ struct workqueue_struct *sched_wq;
+
/* context for accelerated drm-internal operations */
struct nouveau_channel *cechan;
struct nouveau_channel *channel;
@@ -298,10 +301,6 @@ struct nouveau_drm {
struct mutex lock;
bool component_registered;
} audio;
-
- struct drm_gpu_scheduler sched;
- struct workqueue_struct *sched_wq;
-
};
static inline struct nouveau_drm *
diff --git a/drivers/gpu/drm/nouveau/nouveau_exec.c b/drivers/gpu/drm/nouveau/nouveau_exec.c
index 9a5ef574744b..bc5d71b79ab2 100644
--- a/drivers/gpu/drm/nouveau/nouveau_exec.c
+++ b/drivers/gpu/drm/nouveau/nouveau_exec.c
@@ -1,7 +1,5 @@
// SPDX-License-Identifier: MIT
-#include <drm/drm_exec.h>
-
#include "nouveau_drv.h"
#include "nouveau_gem.h"
#include "nouveau_mem.h"
@@ -86,14 +84,12 @@
*/
static int
-nouveau_exec_job_submit(struct nouveau_job *job)
+nouveau_exec_job_submit(struct nouveau_job *job,
+ struct drm_gpuvm_exec *vme)
{
struct nouveau_exec_job *exec_job = to_nouveau_exec_job(job);
struct nouveau_cli *cli = job->cli;
struct nouveau_uvmm *uvmm = nouveau_cli_uvmm(cli);
- struct drm_exec *exec = &job->exec;
- struct drm_gem_object *obj;
- unsigned long index;
int ret;
/* Create a new fence, but do not emit yet. */
@@ -102,52 +98,29 @@ nouveau_exec_job_submit(struct nouveau_job *job)
return ret;
nouveau_uvmm_lock(uvmm);
- drm_exec_init(exec, DRM_EXEC_INTERRUPTIBLE_WAIT |
- DRM_EXEC_IGNORE_DUPLICATES);
- drm_exec_until_all_locked(exec) {
- struct drm_gpuva *va;
-
- drm_gpuvm_for_each_va(va, &uvmm->base) {
- if (unlikely(va == &uvmm->base.kernel_alloc_node))
- continue;
-
- ret = drm_exec_prepare_obj(exec, va->gem.obj, 1);
- drm_exec_retry_on_contention(exec);
- if (ret)
- goto err_uvmm_unlock;
- }
+ ret = drm_gpuvm_exec_lock(vme);
+ if (ret) {
+ nouveau_uvmm_unlock(uvmm);
+ return ret;
}
nouveau_uvmm_unlock(uvmm);
- drm_exec_for_each_locked_object(exec, index, obj) {
- struct nouveau_bo *nvbo = nouveau_gem_object(obj);
-
- ret = nouveau_bo_validate(nvbo, true, false);
- if (ret)
- goto err_exec_fini;
+ ret = drm_gpuvm_exec_validate(vme);
+ if (ret) {
+ drm_gpuvm_exec_unlock(vme);
+ return ret;
}
return 0;
-
-err_uvmm_unlock:
- nouveau_uvmm_unlock(uvmm);
-err_exec_fini:
- drm_exec_fini(exec);
- return ret;
-
}
static void
-nouveau_exec_job_armed_submit(struct nouveau_job *job)
+nouveau_exec_job_armed_submit(struct nouveau_job *job,
+ struct drm_gpuvm_exec *vme)
{
- struct drm_exec *exec = &job->exec;
- struct drm_gem_object *obj;
- unsigned long index;
-
- drm_exec_for_each_locked_object(exec, index, obj)
- dma_resv_add_fence(obj->resv, job->done_fence, job->resv_usage);
-
- drm_exec_fini(exec);
+ drm_gpuvm_exec_resv_add_fence(vme, job->done_fence,
+ job->resv_usage, job->resv_usage);
+ drm_gpuvm_exec_unlock(vme);
}
static struct dma_fence *
@@ -192,6 +165,7 @@ nouveau_exec_job_free(struct nouveau_job *job)
{
struct nouveau_exec_job *exec_job = to_nouveau_exec_job(job);
+ nouveau_job_done(job);
nouveau_job_free(job);
kfree(exec_job->fence);
@@ -211,8 +185,6 @@ nouveau_exec_job_timeout(struct nouveau_job *job)
NV_PRINTK(warn, job->cli, "job timeout, channel %d killed!\n",
chan->chid);
- nouveau_sched_entity_fini(job->entity);
-
return DRM_GPU_SCHED_STAT_NOMINAL;
}
@@ -259,10 +231,12 @@ nouveau_exec_job_init(struct nouveau_exec_job **pjob,
}
}
+ args.file_priv = __args->file_priv;
job->chan = __args->chan;
- args.sched_entity = __args->sched_entity;
- args.file_priv = __args->file_priv;
+ args.sched = __args->sched;
+ /* Plus one to account for the HW fence. */
+ args.credits = job->push.count + 1;
args.in_sync.count = __args->in_sync.count;
args.in_sync.s = __args->in_sync.s;
@@ -415,7 +389,7 @@ nouveau_exec_ioctl_exec(struct drm_device *dev,
if (ret)
goto out;
- args.sched_entity = &chan16->sched_entity;
+ args.sched = &chan16->sched;
args.file_priv = file_priv;
args.chan = chan;
diff --git a/drivers/gpu/drm/nouveau/nouveau_exec.h b/drivers/gpu/drm/nouveau/nouveau_exec.h
index 5488d337bcc0..9b3b151facfd 100644
--- a/drivers/gpu/drm/nouveau/nouveau_exec.h
+++ b/drivers/gpu/drm/nouveau/nouveau_exec.h
@@ -3,16 +3,12 @@
#ifndef __NOUVEAU_EXEC_H__
#define __NOUVEAU_EXEC_H__
-#include <drm/drm_exec.h>
-
#include "nouveau_drv.h"
#include "nouveau_sched.h"
struct nouveau_exec_job_args {
struct drm_file *file_priv;
- struct nouveau_sched_entity *sched_entity;
-
- struct drm_exec exec;
+ struct nouveau_sched *sched;
struct nouveau_channel *chan;
struct {
diff --git a/drivers/gpu/drm/nouveau/nouveau_sched.c b/drivers/gpu/drm/nouveau/nouveau_sched.c
index e03fd2bc8a11..dd98f6910f9c 100644
--- a/drivers/gpu/drm/nouveau/nouveau_sched.c
+++ b/drivers/gpu/drm/nouveau/nouveau_sched.c
@@ -12,30 +12,28 @@
#include "nouveau_abi16.h"
#include "nouveau_sched.h"
-/* FIXME
- *
- * We want to make sure that jobs currently executing can't be deferred by
- * other jobs competing for the hardware. Otherwise we might end up with job
- * timeouts just because of too many clients submitting too many jobs. We don't
- * want jobs to time out because of system load, but because of the job being
- * too bulky.
- *
- * For now allow for up to 16 concurrent jobs in flight until we know how many
- * rings the hardware can process in parallel.
- */
-#define NOUVEAU_SCHED_HW_SUBMISSIONS 16
#define NOUVEAU_SCHED_JOB_TIMEOUT_MS 10000
+/* Starts at 0, since the DRM scheduler interprets those parameters as (initial)
+ * index to the run-queue array.
+ */
+enum nouveau_sched_priority {
+ NOUVEAU_SCHED_PRIORITY_SINGLE = DRM_SCHED_PRIORITY_KERNEL,
+ NOUVEAU_SCHED_PRIORITY_COUNT,
+};
+
int
nouveau_job_init(struct nouveau_job *job,
struct nouveau_job_args *args)
{
- struct nouveau_sched_entity *entity = args->sched_entity;
+ struct nouveau_sched *sched = args->sched;
int ret;
+ INIT_LIST_HEAD(&job->entry);
+
job->file_priv = args->file_priv;
job->cli = nouveau_cli(args->file_priv);
- job->entity = entity;
+ job->sched = sched;
job->sync = args->sync;
job->resv_usage = args->resv_usage;
@@ -86,10 +84,10 @@ nouveau_job_init(struct nouveau_job *job,
ret = -ENOMEM;
goto err_free_objs;
}
-
}
- ret = drm_sched_job_init(&job->base, &entity->base, 1, NULL);
+ ret = drm_sched_job_init(&job->base, &sched->entity,
+ args->credits, NULL);
if (ret)
goto err_free_chains;
@@ -109,6 +107,27 @@ return ret;
}
void
+nouveau_job_fini(struct nouveau_job *job)
+{
+ dma_fence_put(job->done_fence);
+ drm_sched_job_cleanup(&job->base);
+
+ job->ops->free(job);
+}
+
+void
+nouveau_job_done(struct nouveau_job *job)
+{
+ struct nouveau_sched *sched = job->sched;
+
+ spin_lock(&sched->job.list.lock);
+ list_del(&job->entry);
+ spin_unlock(&sched->job.list.lock);
+
+ wake_up(&sched->job.wq);
+}
+
+void
nouveau_job_free(struct nouveau_job *job)
{
kfree(job->in_sync.data);
@@ -117,13 +136,6 @@ nouveau_job_free(struct nouveau_job *job)
kfree(job->out_sync.chains);
}
-void nouveau_job_fini(struct nouveau_job *job)
-{
- dma_fence_put(job->done_fence);
- drm_sched_job_cleanup(&job->base);
- job->ops->free(job);
-}
-
static int
sync_find_fence(struct nouveau_job *job,
struct drm_nouveau_sync *sync,
@@ -261,8 +273,13 @@ nouveau_job_fence_attach(struct nouveau_job *job)
int
nouveau_job_submit(struct nouveau_job *job)
{
- struct nouveau_sched_entity *entity = to_nouveau_sched_entity(job->base.entity);
+ struct nouveau_sched *sched = job->sched;
struct dma_fence *done_fence = NULL;
+ struct drm_gpuvm_exec vm_exec = {
+ .vm = &nouveau_cli_uvmm(job->cli)->base,
+ .flags = DRM_EXEC_IGNORE_DUPLICATES,
+ .num_fences = 1,
+ };
int ret;
ret = nouveau_job_add_deps(job);
@@ -276,46 +293,29 @@ nouveau_job_submit(struct nouveau_job *job)
/* Make sure the job appears on the sched_entity's queue in the same
* order as it was submitted.
*/
- mutex_lock(&entity->mutex);
+ mutex_lock(&sched->mutex);
/* Guarantee we won't fail after the submit() callback returned
* successfully.
*/
if (job->ops->submit) {
- ret = job->ops->submit(job);
+ ret = job->ops->submit(job, &vm_exec);
if (ret)
goto err_cleanup;
}
+ /* Submit was successful; add the job to the schedulers job list. */
+ spin_lock(&sched->job.list.lock);
+ list_add(&job->entry, &sched->job.list.head);
+ spin_unlock(&sched->job.list.lock);
+
drm_sched_job_arm(&job->base);
job->done_fence = dma_fence_get(&job->base.s_fence->finished);
if (job->sync)
done_fence = dma_fence_get(job->done_fence);
- /* If a sched job depends on a dma-fence from a job from the same GPU
- * scheduler instance, but a different scheduler entity, the GPU
- * scheduler does only wait for the particular job to be scheduled,
- * rather than for the job to fully complete. This is due to the GPU
- * scheduler assuming that there is a scheduler instance per ring.
- * However, the current implementation, in order to avoid arbitrary
- * amounts of kthreads, has a single scheduler instance while scheduler
- * entities represent rings.
- *
- * As a workaround, set the DRM_SCHED_FENCE_DONT_PIPELINE for all
- * out-fences in order to force the scheduler to wait for full job
- * completion for dependent jobs from different entities and same
- * scheduler instance.
- *
- * There is some work in progress [1] to address the issues of firmware
- * schedulers; once it is in-tree the scheduler topology in Nouveau
- * should be re-worked accordingly.
- *
- * [1] https://lore.kernel.org/dri-devel/[email protected]/
- */
- set_bit(DRM_SCHED_FENCE_DONT_PIPELINE, &job->done_fence->flags);
-
if (job->ops->armed_submit)
- job->ops->armed_submit(job);
+ job->ops->armed_submit(job, &vm_exec);
nouveau_job_fence_attach(job);
@@ -326,7 +326,7 @@ nouveau_job_submit(struct nouveau_job *job)
drm_sched_entity_push_job(&job->base);
- mutex_unlock(&entity->mutex);
+ mutex_unlock(&sched->mutex);
if (done_fence) {
dma_fence_wait(done_fence, true);
@@ -336,20 +336,13 @@ nouveau_job_submit(struct nouveau_job *job)
return 0;
err_cleanup:
- mutex_unlock(&entity->mutex);
+ mutex_unlock(&sched->mutex);
nouveau_job_fence_attach_cleanup(job);
err:
job->state = NOUVEAU_JOB_SUBMIT_FAILED;
return ret;
}
-bool
-nouveau_sched_entity_qwork(struct nouveau_sched_entity *entity,
- struct work_struct *work)
-{
- return queue_work(entity->sched_wq, work);
-}
-
static struct dma_fence *
nouveau_job_run(struct nouveau_job *job)
{
@@ -399,50 +392,82 @@ nouveau_sched_free_job(struct drm_sched_job *sched_job)
nouveau_job_fini(job);
}
-int nouveau_sched_entity_init(struct nouveau_sched_entity *entity,
- struct drm_gpu_scheduler *sched,
- struct workqueue_struct *sched_wq)
-{
- mutex_init(&entity->mutex);
- spin_lock_init(&entity->job.list.lock);
- INIT_LIST_HEAD(&entity->job.list.head);
- init_waitqueue_head(&entity->job.wq);
-
- entity->sched_wq = sched_wq;
- return drm_sched_entity_init(&entity->base,
- DRM_SCHED_PRIORITY_NORMAL,
- &sched, 1, NULL);
-}
-
-void
-nouveau_sched_entity_fini(struct nouveau_sched_entity *entity)
-{
- drm_sched_entity_destroy(&entity->base);
-}
-
static const struct drm_sched_backend_ops nouveau_sched_ops = {
.run_job = nouveau_sched_run_job,
.timedout_job = nouveau_sched_timedout_job,
.free_job = nouveau_sched_free_job,
};
-int nouveau_sched_init(struct nouveau_drm *drm)
+int
+nouveau_sched_init(struct nouveau_sched *sched, struct nouveau_drm *drm,
+ struct workqueue_struct *wq, u32 credit_limit)
{
- struct drm_gpu_scheduler *sched = &drm->sched;
+ struct drm_gpu_scheduler *drm_sched = &sched->base;
+ struct drm_sched_entity *entity = &sched->entity;
long job_hang_limit = msecs_to_jiffies(NOUVEAU_SCHED_JOB_TIMEOUT_MS);
+ int ret;
- drm->sched_wq = create_singlethread_workqueue("nouveau_sched_wq");
- if (!drm->sched_wq)
- return -ENOMEM;
+ if (!wq) {
+ wq = alloc_workqueue("nouveau_sched_wq_%d", 0, WQ_MAX_ACTIVE,
+ current->pid);
+ if (!wq)
+ return -ENOMEM;
+
+ sched->wq = wq;
+ }
- return drm_sched_init(sched, &nouveau_sched_ops, NULL,
- DRM_SCHED_PRIORITY_COUNT,
- NOUVEAU_SCHED_HW_SUBMISSIONS, 0, job_hang_limit,
- NULL, NULL, "nouveau_sched", drm->dev->dev);
+ ret = drm_sched_init(drm_sched, &nouveau_sched_ops, wq,
+ NOUVEAU_SCHED_PRIORITY_COUNT,
+ credit_limit, 0, job_hang_limit,
+ NULL, NULL, "nouveau_sched", drm->dev->dev);
+ if (ret)
+ goto fail_wq;
+
+ /* Using DRM_SCHED_PRIORITY_KERNEL, since that's what we're required to use
+ * when we want to have a single run-queue only.
+ *
+ * It's not documented, but one will find out when trying to use any
+ * other priority running into faults, because the scheduler uses the
+ * priority as array index.
+ *
+ * Can't use NOUVEAU_SCHED_PRIORITY_SINGLE either, because it's not
+ * matching the enum type used in drm_sched_entity_init().
+ */
+ ret = drm_sched_entity_init(entity, DRM_SCHED_PRIORITY_KERNEL,
+ &drm_sched, 1, NULL);
+ if (ret)
+ goto fail_sched;
+
+ mutex_init(&sched->mutex);
+ spin_lock_init(&sched->job.list.lock);
+ INIT_LIST_HEAD(&sched->job.list.head);
+ init_waitqueue_head(&sched->job.wq);
+
+ return 0;
+
+fail_sched:
+ drm_sched_fini(drm_sched);
+fail_wq:
+ if (sched->wq)
+ destroy_workqueue(sched->wq);
+ return ret;
}
-void nouveau_sched_fini(struct nouveau_drm *drm)
+void
+nouveau_sched_fini(struct nouveau_sched *sched)
{
- destroy_workqueue(drm->sched_wq);
- drm_sched_fini(&drm->sched);
+ struct drm_gpu_scheduler *drm_sched = &sched->base;
+ struct drm_sched_entity *entity = &sched->entity;
+
+ rmb(); /* for list_empty to work without lock */
+ wait_event(sched->job.wq, list_empty(&sched->job.list.head));
+
+ drm_sched_entity_fini(entity);
+ drm_sched_fini(drm_sched);
+
+ /* Destroy workqueue after scheduler tear down, otherwise it might still
+ * be in use.
+ */
+ if (sched->wq)
+ destroy_workqueue(sched->wq);
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_sched.h b/drivers/gpu/drm/nouveau/nouveau_sched.h
index 27ac19792597..a6528f5981e6 100644
--- a/drivers/gpu/drm/nouveau/nouveau_sched.h
+++ b/drivers/gpu/drm/nouveau/nouveau_sched.h
@@ -5,7 +5,7 @@
#include <linux/types.h>
-#include <drm/drm_exec.h>
+#include <drm/drm_gpuvm.h>
#include <drm/gpu_scheduler.h>
#include "nouveau_drv.h"
@@ -26,7 +26,8 @@ enum nouveau_job_state {
struct nouveau_job_args {
struct drm_file *file_priv;
- struct nouveau_sched_entity *sched_entity;
+ struct nouveau_sched *sched;
+ u32 credits;
enum dma_resv_usage resv_usage;
bool sync;
@@ -49,12 +50,12 @@ struct nouveau_job {
enum nouveau_job_state state;
- struct nouveau_sched_entity *entity;
+ struct nouveau_sched *sched;
+ struct list_head entry;
struct drm_file *file_priv;
struct nouveau_cli *cli;
- struct drm_exec exec;
enum dma_resv_usage resv_usage;
struct dma_fence *done_fence;
@@ -76,8 +77,8 @@ struct nouveau_job {
/* If .submit() returns without any error, it is guaranteed that
* armed_submit() is called.
*/
- int (*submit)(struct nouveau_job *);
- void (*armed_submit)(struct nouveau_job *);
+ int (*submit)(struct nouveau_job *, struct drm_gpuvm_exec *);
+ void (*armed_submit)(struct nouveau_job *, struct drm_gpuvm_exec *);
struct dma_fence *(*run)(struct nouveau_job *);
void (*free)(struct nouveau_job *);
enum drm_gpu_sched_stat (*timeout)(struct nouveau_job *);
@@ -90,20 +91,17 @@ int nouveau_job_ucopy_syncs(struct nouveau_job_args *args,
int nouveau_job_init(struct nouveau_job *job,
struct nouveau_job_args *args);
-void nouveau_job_free(struct nouveau_job *job);
-
-int nouveau_job_submit(struct nouveau_job *job);
void nouveau_job_fini(struct nouveau_job *job);
+int nouveau_job_submit(struct nouveau_job *job);
+void nouveau_job_done(struct nouveau_job *job);
+void nouveau_job_free(struct nouveau_job *job);
-#define to_nouveau_sched_entity(entity) \
- container_of((entity), struct nouveau_sched_entity, base)
-
-struct nouveau_sched_entity {
- struct drm_sched_entity base;
+struct nouveau_sched {
+ struct drm_gpu_scheduler base;
+ struct drm_sched_entity entity;
+ struct workqueue_struct *wq;
struct mutex mutex;
- struct workqueue_struct *sched_wq;
-
struct {
struct {
struct list_head head;
@@ -113,15 +111,8 @@ struct nouveau_sched_entity {
} job;
};
-int nouveau_sched_entity_init(struct nouveau_sched_entity *entity,
- struct drm_gpu_scheduler *sched,
- struct workqueue_struct *sched_wq);
-void nouveau_sched_entity_fini(struct nouveau_sched_entity *entity);
-
-bool nouveau_sched_entity_qwork(struct nouveau_sched_entity *entity,
- struct work_struct *work);
-
-int nouveau_sched_init(struct nouveau_drm *drm);
-void nouveau_sched_fini(struct nouveau_drm *drm);
+int nouveau_sched_init(struct nouveau_sched *sched, struct nouveau_drm *drm,
+ struct workqueue_struct *wq, u32 credit_limit);
+void nouveau_sched_fini(struct nouveau_sched *sched);
#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_uvmm.c b/drivers/gpu/drm/nouveau/nouveau_uvmm.c
index eda7bb8624f1..dae3baf707a0 100644
--- a/drivers/gpu/drm/nouveau/nouveau_uvmm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_uvmm.c
@@ -438,8 +438,9 @@ nouveau_uvma_region_complete(struct nouveau_uvma_region *reg)
static void
op_map_prepare_unwind(struct nouveau_uvma *uvma)
{
+ struct drm_gpuva *va = &uvma->va;
nouveau_uvma_gem_put(uvma);
- drm_gpuva_remove(&uvma->va);
+ drm_gpuva_remove(va);
nouveau_uvma_free(uvma);
}
@@ -468,6 +469,7 @@ nouveau_uvmm_sm_prepare_unwind(struct nouveau_uvmm *uvmm,
break;
case DRM_GPUVA_OP_REMAP: {
struct drm_gpuva_op_remap *r = &op->remap;
+ struct drm_gpuva *va = r->unmap->va;
if (r->next)
op_map_prepare_unwind(new->next);
@@ -475,7 +477,7 @@ nouveau_uvmm_sm_prepare_unwind(struct nouveau_uvmm *uvmm,
if (r->prev)
op_map_prepare_unwind(new->prev);
- op_unmap_prepare_unwind(r->unmap->va);
+ op_unmap_prepare_unwind(va);
break;
}
case DRM_GPUVA_OP_UNMAP:
@@ -606,6 +608,9 @@ op_unmap_prepare(struct drm_gpuva_op_unmap *u)
drm_gpuva_unmap(u);
}
+/*
+ * Note: @args should not be NULL when calling for a map operation.
+ */
static int
nouveau_uvmm_sm_prepare(struct nouveau_uvmm *uvmm,
struct nouveau_uvma_prealloc *new,
@@ -626,7 +631,7 @@ nouveau_uvmm_sm_prepare(struct nouveau_uvmm *uvmm,
if (ret)
goto unwind;
- if (args && vmm_get_range) {
+ if (vmm_get_range) {
ret = nouveau_uvmm_vmm_get(uvmm, vmm_get_start,
vmm_get_range);
if (ret) {
@@ -634,6 +639,7 @@ nouveau_uvmm_sm_prepare(struct nouveau_uvmm *uvmm,
goto unwind;
}
}
+
break;
}
case DRM_GPUVA_OP_REMAP: {
@@ -960,6 +966,12 @@ nouveau_uvmm_bind_job_free(struct kref *kref)
{
struct nouveau_uvmm_bind_job *job =
container_of(kref, struct nouveau_uvmm_bind_job, kref);
+ struct bind_job_op *op, *next;
+
+ list_for_each_op_safe(op, next, &job->ops) {
+ list_del(&op->entry);
+ kfree(op);
+ }
nouveau_job_free(&job->base);
kfree(job);
@@ -1001,14 +1013,16 @@ bind_validate_op(struct nouveau_job *job,
static void
bind_validate_map_sparse(struct nouveau_job *job, u64 addr, u64 range)
{
- struct nouveau_uvmm_bind_job *bind_job;
- struct nouveau_sched_entity *entity = job->entity;
+ struct nouveau_sched *sched = job->sched;
+ struct nouveau_job *__job;
struct bind_job_op *op;
u64 end = addr + range;
again:
- spin_lock(&entity->job.list.lock);
- list_for_each_entry(bind_job, &entity->job.list.head, entry) {
+ spin_lock(&sched->job.list.lock);
+ list_for_each_entry(__job, &sched->job.list.head, entry) {
+ struct nouveau_uvmm_bind_job *bind_job = to_uvmm_bind_job(__job);
+
list_for_each_op(op, &bind_job->ops) {
if (op->op == OP_UNMAP) {
u64 op_addr = op->va.addr;
@@ -1016,7 +1030,7 @@ again:
if (!(end <= op_addr || addr >= op_end)) {
nouveau_uvmm_bind_job_get(bind_job);
- spin_unlock(&entity->job.list.lock);
+ spin_unlock(&sched->job.list.lock);
wait_for_completion(&bind_job->complete);
nouveau_uvmm_bind_job_put(bind_job);
goto again;
@@ -1024,7 +1038,7 @@ again:
}
}
}
- spin_unlock(&entity->job.list.lock);
+ spin_unlock(&sched->job.list.lock);
}
static int
@@ -1135,12 +1149,52 @@ bind_link_gpuvas(struct bind_job_op *bop)
}
static int
-nouveau_uvmm_bind_job_submit(struct nouveau_job *job)
+bind_lock_validate(struct nouveau_job *job, struct drm_exec *exec,
+ unsigned int num_fences)
+{
+ struct nouveau_uvmm_bind_job *bind_job = to_uvmm_bind_job(job);
+ struct bind_job_op *op;
+ int ret;
+
+ list_for_each_op(op, &bind_job->ops) {
+ struct drm_gpuva_op *va_op;
+
+ if (!op->ops)
+ continue;
+
+ drm_gpuva_for_each_op(va_op, op->ops) {
+ struct drm_gem_object *obj = op_gem_obj(va_op);
+
+ if (unlikely(!obj))
+ continue;
+
+ ret = drm_exec_prepare_obj(exec, obj, num_fences);
+ if (ret)
+ return ret;
+
+ /* Don't validate GEMs backing mappings we're about to
+ * unmap, it's not worth the effort.
+ */
+ if (va_op->op == DRM_GPUVA_OP_UNMAP)
+ continue;
+
+ ret = nouveau_bo_validate(nouveau_gem_object(obj),
+ true, false);
+ if (ret)
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int
+nouveau_uvmm_bind_job_submit(struct nouveau_job *job,
+ struct drm_gpuvm_exec *vme)
{
struct nouveau_uvmm *uvmm = nouveau_cli_uvmm(job->cli);
struct nouveau_uvmm_bind_job *bind_job = to_uvmm_bind_job(job);
- struct nouveau_sched_entity *entity = job->entity;
- struct drm_exec *exec = &job->exec;
+ struct drm_exec *exec = &vme->exec;
struct bind_job_op *op;
int ret;
@@ -1157,6 +1211,8 @@ nouveau_uvmm_bind_job_submit(struct nouveau_job *job)
dma_resv_unlock(obj->resv);
if (IS_ERR(op->vm_bo))
return PTR_ERR(op->vm_bo);
+
+ drm_gpuvm_bo_extobj_add(op->vm_bo);
}
ret = bind_validate_op(job, op);
@@ -1179,6 +1235,7 @@ nouveau_uvmm_bind_job_submit(struct nouveau_job *job)
* unwind all GPU VA space changes on failure.
*/
nouveau_uvmm_lock(uvmm);
+
list_for_each_op(op, &bind_job->ops) {
switch (op->op) {
case OP_MAP_SPARSE:
@@ -1290,55 +1347,13 @@ nouveau_uvmm_bind_job_submit(struct nouveau_job *job)
}
}
- drm_exec_init(exec, DRM_EXEC_INTERRUPTIBLE_WAIT |
- DRM_EXEC_IGNORE_DUPLICATES);
+ drm_exec_init(exec, vme->flags);
drm_exec_until_all_locked(exec) {
- list_for_each_op(op, &bind_job->ops) {
- struct drm_gpuva_op *va_op;
-
- if (IS_ERR_OR_NULL(op->ops))
- continue;
-
- drm_gpuva_for_each_op(va_op, op->ops) {
- struct drm_gem_object *obj = op_gem_obj(va_op);
-
- if (unlikely(!obj))
- continue;
-
- ret = drm_exec_prepare_obj(exec, obj, 1);
- drm_exec_retry_on_contention(exec);
- if (ret) {
- op = list_last_op(&bind_job->ops);
- goto unwind;
- }
- }
- }
- }
-
- list_for_each_op(op, &bind_job->ops) {
- struct drm_gpuva_op *va_op;
-
- if (IS_ERR_OR_NULL(op->ops))
- continue;
-
- drm_gpuva_for_each_op(va_op, op->ops) {
- struct drm_gem_object *obj = op_gem_obj(va_op);
-
- if (unlikely(!obj))
- continue;
-
- /* Don't validate GEMs backing mappings we're about to
- * unmap, it's not worth the effort.
- */
- if (unlikely(va_op->op == DRM_GPUVA_OP_UNMAP))
- continue;
-
- ret = nouveau_bo_validate(nouveau_gem_object(obj),
- true, false);
- if (ret) {
- op = list_last_op(&bind_job->ops);
- goto unwind;
- }
+ ret = bind_lock_validate(job, exec, vme->num_fences);
+ drm_exec_retry_on_contention(exec);
+ if (ret) {
+ op = list_last_op(&bind_job->ops);
+ goto unwind;
}
}
@@ -1375,10 +1390,6 @@ nouveau_uvmm_bind_job_submit(struct nouveau_job *job)
}
nouveau_uvmm_unlock(uvmm);
- spin_lock(&entity->job.list.lock);
- list_add(&bind_job->entry, &entity->job.list.head);
- spin_unlock(&entity->job.list.lock);
-
return 0;
unwind_continue:
@@ -1413,21 +1424,17 @@ unwind:
}
nouveau_uvmm_unlock(uvmm);
- drm_exec_fini(exec);
+ drm_gpuvm_exec_unlock(vme);
return ret;
}
static void
-nouveau_uvmm_bind_job_armed_submit(struct nouveau_job *job)
+nouveau_uvmm_bind_job_armed_submit(struct nouveau_job *job,
+ struct drm_gpuvm_exec *vme)
{
- struct drm_exec *exec = &job->exec;
- struct drm_gem_object *obj;
- unsigned long index;
-
- drm_exec_for_each_locked_object(exec, index, obj)
- dma_resv_add_fence(obj->resv, job->done_fence, job->resv_usage);
-
- drm_exec_fini(exec);
+ drm_gpuvm_exec_resv_add_fence(vme, job->done_fence,
+ job->resv_usage, job->resv_usage);
+ drm_gpuvm_exec_unlock(vme);
}
static struct dma_fence *
@@ -1465,14 +1472,11 @@ out:
}
static void
-nouveau_uvmm_bind_job_free_work_fn(struct work_struct *work)
+nouveau_uvmm_bind_job_cleanup(struct nouveau_job *job)
{
- struct nouveau_uvmm_bind_job *bind_job =
- container_of(work, struct nouveau_uvmm_bind_job, work);
- struct nouveau_job *job = &bind_job->base;
+ struct nouveau_uvmm_bind_job *bind_job = to_uvmm_bind_job(job);
struct nouveau_uvmm *uvmm = nouveau_cli_uvmm(job->cli);
- struct nouveau_sched_entity *entity = job->entity;
- struct bind_job_op *op, *next;
+ struct bind_job_op *op;
list_for_each_op(op, &bind_job->ops) {
struct drm_gem_object *obj = op->gem.obj;
@@ -1524,38 +1528,17 @@ nouveau_uvmm_bind_job_free_work_fn(struct work_struct *work)
drm_gem_object_put(obj);
}
- spin_lock(&entity->job.list.lock);
- list_del(&bind_job->entry);
- spin_unlock(&entity->job.list.lock);
-
+ nouveau_job_done(job);
complete_all(&bind_job->complete);
- wake_up(&entity->job.wq);
-
- /* Remove and free ops after removing the bind job from the job list to
- * avoid races against bind_validate_map_sparse().
- */
- list_for_each_op_safe(op, next, &bind_job->ops) {
- list_del(&op->entry);
- kfree(op);
- }
nouveau_uvmm_bind_job_put(bind_job);
}
-static void
-nouveau_uvmm_bind_job_free_qwork(struct nouveau_job *job)
-{
- struct nouveau_uvmm_bind_job *bind_job = to_uvmm_bind_job(job);
- struct nouveau_sched_entity *entity = job->entity;
-
- nouveau_sched_entity_qwork(entity, &bind_job->work);
-}
-
static struct nouveau_job_ops nouveau_bind_job_ops = {
.submit = nouveau_uvmm_bind_job_submit,
.armed_submit = nouveau_uvmm_bind_job_armed_submit,
.run = nouveau_uvmm_bind_job_run,
- .free = nouveau_uvmm_bind_job_free_qwork,
+ .free = nouveau_uvmm_bind_job_cleanup,
};
static int
@@ -1616,7 +1599,6 @@ nouveau_uvmm_bind_job_init(struct nouveau_uvmm_bind_job **pjob,
return ret;
INIT_LIST_HEAD(&job->ops);
- INIT_LIST_HEAD(&job->entry);
for (i = 0; i < __args->op.count; i++) {
ret = bind_job_op_from_uop(&op, &__args->op.s[i]);
@@ -1627,11 +1609,12 @@ nouveau_uvmm_bind_job_init(struct nouveau_uvmm_bind_job **pjob,
}
init_completion(&job->complete);
- INIT_WORK(&job->work, nouveau_uvmm_bind_job_free_work_fn);
- args.sched_entity = __args->sched_entity;
args.file_priv = __args->file_priv;
+ args.sched = __args->sched;
+ args.credits = 1;
+
args.in_sync.count = __args->in_sync.count;
args.in_sync.s = __args->in_sync.s;
@@ -1757,7 +1740,7 @@ nouveau_uvmm_ioctl_vm_bind(struct drm_device *dev,
if (ret)
return ret;
- args.sched_entity = &cli->sched_entity;
+ args.sched = &cli->sched;
args.file_priv = file_priv;
ret = nouveau_uvmm_vm_bind(&args);
@@ -1815,8 +1798,17 @@ nouveau_uvmm_free(struct drm_gpuvm *gpuvm)
kfree(uvmm);
}
+static int
+nouveau_uvmm_bo_validate(struct drm_gpuvm_bo *vm_bo, struct drm_exec *exec)
+{
+ struct nouveau_bo *nvbo = nouveau_gem_object(vm_bo->obj);
+
+ return nouveau_bo_validate(nvbo, true, false);
+}
+
static const struct drm_gpuvm_ops gpuvm_ops = {
.vm_free = nouveau_uvmm_free,
+ .vm_bo_validate = nouveau_uvmm_bo_validate,
};
int
@@ -1900,12 +1892,8 @@ nouveau_uvmm_fini(struct nouveau_uvmm *uvmm)
MA_STATE(mas, &uvmm->region_mt, 0, 0);
struct nouveau_uvma_region *reg;
struct nouveau_cli *cli = uvmm->vmm.cli;
- struct nouveau_sched_entity *entity = &cli->sched_entity;
struct drm_gpuva *va, *next;
- rmb(); /* for list_empty to work without lock */
- wait_event(entity->job.wq, list_empty(&entity->job.list.head));
-
nouveau_uvmm_lock(uvmm);
drm_gpuvm_for_each_va_safe(va, next, &uvmm->base) {
struct nouveau_uvma *uvma = uvma_from_va(va);
diff --git a/drivers/gpu/drm/nouveau/nouveau_uvmm.h b/drivers/gpu/drm/nouveau/nouveau_uvmm.h
index f0a6d98ace4f..9d3c348581eb 100644
--- a/drivers/gpu/drm/nouveau/nouveau_uvmm.h
+++ b/drivers/gpu/drm/nouveau/nouveau_uvmm.h
@@ -44,8 +44,6 @@ struct nouveau_uvmm_bind_job {
struct nouveau_job base;
struct kref kref;
- struct list_head entry;
- struct work_struct work;
struct completion complete;
/* struct bind_job_op */
@@ -54,7 +52,7 @@ struct nouveau_uvmm_bind_job {
struct nouveau_uvmm_bind_job_args {
struct drm_file *file_priv;
- struct nouveau_sched_entity *sched_entity;
+ struct nouveau_sched *sched;
unsigned int flags;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c
index dc44f5c7833f..44fb86841c05 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c
@@ -365,10 +365,8 @@ r535_gsp_rpc_send(struct nvkm_gsp *gsp, void *argv, bool wait, u32 repc)
}
ret = r535_gsp_cmdq_push(gsp, rpc);
- if (ret) {
- mutex_unlock(&gsp->cmdq.mutex);
+ if (ret)
return ERR_PTR(ret);
- }
if (wait) {
msg = r535_gsp_msg_recv(gsp, fn, repc);
@@ -1048,7 +1046,7 @@ r535_gsp_rpc_set_registry(struct nvkm_gsp *gsp)
char *strings;
int str_offset;
int i;
- size_t rpc_size = sizeof(*rpc) + sizeof(rpc->entries[0]) * NV_GSP_REG_NUM_ENTRIES;
+ size_t rpc_size = struct_size(rpc, entries, NV_GSP_REG_NUM_ENTRIES);
/* add strings + null terminator */
for (i = 0; i < NV_GSP_REG_NUM_ENTRIES; i++)
@@ -1379,6 +1377,13 @@ r535_gsp_msg_post_event(void *priv, u32 fn, void *repv, u32 repc)
return 0;
}
+/**
+ * r535_gsp_msg_run_cpu_sequencer() -- process I/O commands from the GSP
+ *
+ * The GSP sequencer is a list of I/O commands that the GSP can send to
+ * the driver to perform for various purposes. The most common usage is to
+ * perform a special mid-initialization reset.
+ */
static int
r535_gsp_msg_run_cpu_sequencer(void *priv, u32 fn, void *repv, u32 repc)
{
@@ -1718,6 +1723,23 @@ r535_gsp_libos_id8(const char *name)
return id;
}
+/**
+ * create_pte_array() - creates a PTE array of a physically contiguous buffer
+ * @ptes: pointer to the array
+ * @addr: base address of physically contiguous buffer (GSP_PAGE_SIZE aligned)
+ * @size: size of the buffer
+ *
+ * GSP-RM sometimes expects physically-contiguous buffers to have an array of
+ * "PTEs" for each page in that buffer. Although in theory that allows for
+ * the buffer to be physically discontiguous, GSP-RM does not currently
+ * support that.
+ *
+ * In this case, the PTEs are DMA addresses of each page of the buffer. Since
+ * the buffer is physically contiguous, calculating all the PTEs is simple
+ * math.
+ *
+ * See memdescGetPhysAddrsForGpu()
+ */
static void create_pte_array(u64 *ptes, dma_addr_t addr, size_t size)
{
unsigned int num_pages = DIV_ROUND_UP_ULL(size, GSP_PAGE_SIZE);
@@ -1727,6 +1749,35 @@ static void create_pte_array(u64 *ptes, dma_addr_t addr, size_t size)
ptes[i] = (u64)addr + (i << GSP_PAGE_SHIFT);
}
+/**
+ * r535_gsp_libos_init() -- create the libos arguments structure
+ *
+ * The logging buffers are byte queues that contain encoded printf-like
+ * messages from GSP-RM. They need to be decoded by a special application
+ * that can parse the buffers.
+ *
+ * The 'loginit' buffer contains logs from early GSP-RM init and
+ * exception dumps. The 'logrm' buffer contains the subsequent logs. Both are
+ * written to directly by GSP-RM and can be any multiple of GSP_PAGE_SIZE.
+ *
+ * The physical address map for the log buffer is stored in the buffer
+ * itself, starting with offset 1. Offset 0 contains the "put" pointer.
+ *
+ * The GSP only understands 4K pages (GSP_PAGE_SIZE), so even if the kernel is
+ * configured for a larger page size (e.g. 64K pages), we need to give
+ * the GSP an array of 4K pages. Fortunately, since the buffer is
+ * physically contiguous, it's simple math to calculate the addresses.
+ *
+ * The buffers must be a multiple of GSP_PAGE_SIZE. GSP-RM also currently
+ * ignores the @kind field for LOGINIT, LOGINTR, and LOGRM, but expects the
+ * buffers to be physically contiguous anyway.
+ *
+ * The memory allocated for the arguments must remain until the GSP sends the
+ * init_done RPC.
+ *
+ * See _kgspInitLibosLoggingStructures (allocates memory for buffers)
+ * See kgspSetupLibosInitArgs_IMPL (creates pLibosInitArgs[] array)
+ */
static int
r535_gsp_libos_init(struct nvkm_gsp *gsp)
{
@@ -1837,6 +1888,35 @@ nvkm_gsp_radix3_dtor(struct nvkm_gsp *gsp, struct nvkm_gsp_radix3 *rx3)
nvkm_gsp_mem_dtor(gsp, &rx3->mem[i]);
}
+/**
+ * nvkm_gsp_radix3_sg - build a radix3 table from a S/G list
+ *
+ * The GSP uses a three-level page table, called radix3, to map the firmware.
+ * Each 64-bit "pointer" in the table is either the bus address of an entry in
+ * the next table (for levels 0 and 1) or the bus address of the next page in
+ * the GSP firmware image itself.
+ *
+ * Level 0 contains a single entry in one page that points to the first page
+ * of level 1.
+ *
+ * Level 1, since it's also only one page in size, contains up to 512 entries,
+ * one for each page in Level 2.
+ *
+ * Level 2 can be up to 512 pages in size, and each of those entries points to
+ * the next page of the firmware image. Since there can be up to 512*512
+ * pages, that limits the size of the firmware to 512*512*GSP_PAGE_SIZE = 1GB.
+ *
+ * Internally, the GSP has its window into system memory, but the base
+ * physical address of the aperture is not 0. In fact, it varies depending on
+ * the GPU architecture. Since the GPU is a PCI device, this window is
+ * accessed via DMA and is therefore bound by IOMMU translation. The end
+ * result is that GSP-RM must translate the bus addresses in the table to GSP
+ * physical addresses. All this should happen transparently.
+ *
+ * Returns 0 on success, or negative error code
+ *
+ * See kgspCreateRadix3_IMPL
+ */
static int
nvkm_gsp_radix3_sg(struct nvkm_device *device, struct sg_table *sgt, u64 size,
struct nvkm_gsp_radix3 *rx3)
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmtu102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmtu102.c
index e34bc6076401..8379e72d77ab 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmtu102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmtu102.c
@@ -31,7 +31,7 @@ tu102_vmm_flush(struct nvkm_vmm *vmm, int depth)
type |= 0x00000001; /* PAGE_ALL */
if (atomic_read(&vmm->engref[NVKM_SUBDEV_BAR]))
- type |= 0x00000004; /* HUB_ONLY */
+ type |= 0x00000006; /* HUB_ONLY | ALL PDB (hack) */
mutex_lock(&vmm->mmu->mutex);
diff --git a/drivers/gpu/drm/omapdrm/dss/dispc.c b/drivers/gpu/drm/omapdrm/dss/dispc.c
index c26aab4939fa..993691b3cc7e 100644
--- a/drivers/gpu/drm/omapdrm/dss/dispc.c
+++ b/drivers/gpu/drm/omapdrm/dss/dispc.c
@@ -22,11 +22,11 @@
#include <linux/hardirq.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
+#include <linux/property.h>
#include <linux/sizes.h>
#include <linux/mfd/syscon.h>
#include <linux/regmap.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/component.h>
#include <linux/sys_soc.h>
#include <drm/drm_fourcc.h>
@@ -4765,7 +4765,7 @@ static int dispc_bind(struct device *dev, struct device *master, void *data)
if (soc)
dispc->feat = soc->data;
else
- dispc->feat = of_match_device(dispc_of_match, &pdev->dev)->data;
+ dispc->feat = device_get_match_data(&pdev->dev);
r = dispc_errata_i734_wa_init(dispc);
if (r)
diff --git a/drivers/gpu/drm/omapdrm/dss/dss.c b/drivers/gpu/drm/omapdrm/dss/dss.c
index 02955f976845..988888e164d7 100644
--- a/drivers/gpu/drm/omapdrm/dss/dss.c
+++ b/drivers/gpu/drm/omapdrm/dss/dss.c
@@ -22,12 +22,13 @@
#include <linux/pinctrl/consumer.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
+#include <linux/property.h>
#include <linux/gfp.h>
#include <linux/sizes.h>
#include <linux/mfd/syscon.h>
#include <linux/regmap.h>
#include <linux/of.h>
-#include <linux/of_device.h>
+#include <linux/of_platform.h>
#include <linux/of_graph.h>
#include <linux/regulator/consumer.h>
#include <linux/suspend.h>
@@ -1445,7 +1446,7 @@ static int dss_probe(struct platform_device *pdev)
if (soc)
dss->feat = soc->data;
else
- dss->feat = of_match_device(dss_of_match, &pdev->dev)->data;
+ dss->feat = device_get_match_data(&pdev->dev);
/* Map I/O registers, get and setup clocks. */
dss->base = devm_platform_ioremap_resource(pdev, 0);
diff --git a/drivers/gpu/drm/omapdrm/omap_gem.c b/drivers/gpu/drm/omapdrm/omap_gem.c
index c48fa531ca32..3421e8389222 100644
--- a/drivers/gpu/drm/omapdrm/omap_gem.c
+++ b/drivers/gpu/drm/omapdrm/omap_gem.c
@@ -48,7 +48,7 @@ struct omap_gem_object {
* OMAP_BO_MEM_DMA_API flag set)
*
* - buffers imported from dmabuf (with the OMAP_BO_MEM_DMABUF flag set)
- * if they are physically contiguous (when sgt->orig_nents == 1)
+ * if they are physically contiguous
*
* - buffers mapped through the TILER when pin_cnt is not zero, in which
* case the DMA address points to the TILER aperture
@@ -148,12 +148,18 @@ u64 omap_gem_mmap_offset(struct drm_gem_object *obj)
return drm_vma_node_offset_addr(&obj->vma_node);
}
+static bool omap_gem_sgt_is_contiguous(struct sg_table *sgt, size_t size)
+{
+ return !(drm_prime_get_contiguous_size(sgt) < size);
+}
+
static bool omap_gem_is_contiguous(struct omap_gem_object *omap_obj)
{
if (omap_obj->flags & OMAP_BO_MEM_DMA_API)
return true;
- if ((omap_obj->flags & OMAP_BO_MEM_DMABUF) && omap_obj->sgt->nents == 1)
+ if ((omap_obj->flags & OMAP_BO_MEM_DMABUF) &&
+ omap_gem_sgt_is_contiguous(omap_obj->sgt, omap_obj->base.size))
return true;
return false;
@@ -1385,7 +1391,7 @@ struct drm_gem_object *omap_gem_new_dmabuf(struct drm_device *dev, size_t size,
union omap_gem_size gsize;
/* Without a DMM only physically contiguous buffers can be supported. */
- if (sgt->orig_nents != 1 && !priv->has_dmm)
+ if (!omap_gem_sgt_is_contiguous(sgt, size) && !priv->has_dmm)
return ERR_PTR(-EINVAL);
gsize.bytes = PAGE_ALIGN(size);
@@ -1399,7 +1405,7 @@ struct drm_gem_object *omap_gem_new_dmabuf(struct drm_device *dev, size_t size,
omap_obj->sgt = sgt;
- if (sgt->orig_nents == 1) {
+ if (omap_gem_sgt_is_contiguous(sgt, size)) {
omap_obj->dma_addr = sg_dma_address(sgt->sgl);
} else {
/* Create pages list from sgt */
diff --git a/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c b/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c
index be8f48e3c1db..c4c0f08e9202 100644
--- a/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c
+++ b/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c
@@ -1764,6 +1764,7 @@ static const struct panel_desc starry_qfh032011_53g_desc = {
.mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_SYNC_PULSE |
MIPI_DSI_MODE_LPM,
.init_cmds = starry_qfh032011_53g_init_cmd,
+ .lp11_before_reset = true,
};
static const struct drm_display_mode starry_himax83102_j02_default_mode = {
diff --git a/drivers/gpu/drm/panel/panel-edp.c b/drivers/gpu/drm/panel/panel-edp.c
index f22677373171..c76f186c4baa 100644
--- a/drivers/gpu/drm/panel/panel-edp.c
+++ b/drivers/gpu/drm/panel/panel-edp.c
@@ -203,6 +203,9 @@ struct edp_panel_entry {
/** @name: Name of this panel (for printing to logs). */
const char *name;
+
+ /** @override_edid_mode: Override the mode obtained by edid. */
+ const struct drm_display_mode *override_edid_mode;
};
struct panel_edp {
@@ -301,6 +304,24 @@ static unsigned int panel_edp_get_display_modes(struct panel_edp *panel,
return num;
}
+static int panel_edp_override_edid_mode(struct panel_edp *panel,
+ struct drm_connector *connector,
+ const struct drm_display_mode *override_mode)
+{
+ struct drm_display_mode *mode;
+
+ mode = drm_mode_duplicate(connector->dev, override_mode);
+ if (!mode) {
+ dev_err(panel->base.dev, "failed to add additional mode\n");
+ return 0;
+ }
+
+ mode->type |= DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
+ drm_mode_set_name(mode);
+ drm_mode_probed_add(connector, mode);
+ return 1;
+}
+
static int panel_edp_get_non_edid_modes(struct panel_edp *panel,
struct drm_connector *connector)
{
@@ -568,6 +589,10 @@ static int panel_edp_get_modes(struct drm_panel *panel,
{
struct panel_edp *p = to_panel_edp(panel);
int num = 0;
+ bool has_hard_coded_modes = p->desc->num_timings || p->desc->num_modes;
+ bool has_override_edid_mode = p->detected_panel &&
+ p->detected_panel != ERR_PTR(-EINVAL) &&
+ p->detected_panel->override_edid_mode;
/* probe EDID if a DDC bus is available */
if (p->ddc) {
@@ -575,20 +600,28 @@ static int panel_edp_get_modes(struct drm_panel *panel,
if (!p->edid)
p->edid = drm_get_edid(connector, p->ddc);
-
- if (p->edid)
- num += drm_add_edid_modes(connector, p->edid);
+ /*
+ * If both edid and hard-coded modes exists, skip edid modes to
+ * avoid multiple preferred modes.
+ */
+ if (p->edid && !has_hard_coded_modes) {
+ if (has_override_edid_mode) {
+ /*
+ * override_edid_mode is specified. Use
+ * override_edid_mode instead of from edid.
+ */
+ num += panel_edp_override_edid_mode(p, connector,
+ p->detected_panel->override_edid_mode);
+ } else {
+ num += drm_add_edid_modes(connector, p->edid);
+ }
+ }
pm_runtime_mark_last_busy(panel->dev);
pm_runtime_put_autosuspend(panel->dev);
}
- /*
- * Add hard-coded panel modes. Don't call this if there are no timings
- * and no modes (the generic edp-panel case) because it will clobber
- * the display_info that was already set by drm_add_edid_modes().
- */
- if (p->desc->num_timings || p->desc->num_modes)
+ if (has_hard_coded_modes)
num += panel_edp_get_non_edid_modes(p, connector);
else if (!num)
dev_warn(p->base.dev, "No display modes\n");
@@ -950,6 +983,19 @@ static const struct panel_desc auo_b101ean01 = {
},
};
+static const struct drm_display_mode auo_b116xa3_mode = {
+ .clock = 70589,
+ .hdisplay = 1366,
+ .hsync_start = 1366 + 40,
+ .hsync_end = 1366 + 40 + 40,
+ .htotal = 1366 + 40 + 40 + 32,
+ .vdisplay = 768,
+ .vsync_start = 768 + 10,
+ .vsync_end = 768 + 10 + 12,
+ .vtotal = 768 + 10 + 12 + 6,
+ .flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC,
+};
+
static const struct drm_display_mode auo_b116xak01_mode = {
.clock = 69300,
.hdisplay = 1366,
@@ -1849,6 +1895,15 @@ static const struct panel_delay delay_200_150_e200 = {
.delay = _delay \
}
+#define EDP_PANEL_ENTRY2(vend_chr_0, vend_chr_1, vend_chr_2, product_id, _delay, _name, _mode) \
+{ \
+ .name = _name, \
+ .panel_id = drm_edid_encode_panel_id(vend_chr_0, vend_chr_1, vend_chr_2, \
+ product_id), \
+ .delay = _delay, \
+ .override_edid_mode = _mode \
+}
+
/*
* This table is used to figure out power sequencing delays for panels that
* are detected by EDID. Entries here may point to entries in the
@@ -1866,9 +1921,11 @@ static const struct edp_panel_entry edp_panels[] = {
EDP_PANEL_ENTRY('A', 'U', 'O', 0x239b, &delay_200_500_e50, "B116XAN06.1"),
EDP_PANEL_ENTRY('A', 'U', 'O', 0x255c, &delay_200_500_e50, "B116XTN02.5"),
EDP_PANEL_ENTRY('A', 'U', 'O', 0x403d, &delay_200_500_e50, "B140HAN04.0"),
- EDP_PANEL_ENTRY('A', 'U', 'O', 0x405c, &auo_b116xak01.delay, "B116XAK01.0"),
+ EDP_PANEL_ENTRY2('A', 'U', 'O', 0x405c, &auo_b116xak01.delay, "B116XAK01.0",
+ &auo_b116xa3_mode),
EDP_PANEL_ENTRY('A', 'U', 'O', 0x582d, &delay_200_500_e50, "B133UAN01.0"),
- EDP_PANEL_ENTRY('A', 'U', 'O', 0x615c, &delay_200_500_e50, "B116XAN06.1"),
+ EDP_PANEL_ENTRY2('A', 'U', 'O', 0x615c, &delay_200_500_e50, "B116XAN06.1",
+ &auo_b116xa3_mode),
EDP_PANEL_ENTRY('A', 'U', 'O', 0x635c, &delay_200_500_e50, "B116XAN06.3"),
EDP_PANEL_ENTRY('A', 'U', 'O', 0x639c, &delay_200_500_e50, "B140HAK02.7"),
EDP_PANEL_ENTRY('A', 'U', 'O', 0x8594, &delay_200_500_e50, "B133UAN01.0"),
@@ -1910,9 +1967,9 @@ static const struct edp_panel_entry edp_panels[] = {
EDP_PANEL_ENTRY('C', 'M', 'N', 0x142b, &delay_200_500_e80_d50, "N140HCA-EAC"),
EDP_PANEL_ENTRY('C', 'M', 'N', 0x144f, &delay_200_500_e80_d50, "N140HGA-EA1"),
EDP_PANEL_ENTRY('C', 'M', 'N', 0x1468, &delay_200_500_e80, "N140HGA-EA1"),
- EDP_PANEL_ENTRY('C', 'M', 'N', 0x14e5, &delay_200_500_e80_d50, "N140HGA-EA1"),
EDP_PANEL_ENTRY('C', 'M', 'N', 0x14d4, &delay_200_500_e80_d50, "N140HCA-EAC"),
EDP_PANEL_ENTRY('C', 'M', 'N', 0x14d6, &delay_200_500_e80_d50, "N140BGA-EA4"),
+ EDP_PANEL_ENTRY('C', 'M', 'N', 0x14e5, &delay_200_500_e80_d50, "N140HGA-EA1"),
EDP_PANEL_ENTRY('H', 'K', 'C', 0x2d5c, &delay_200_500_e200, "MB116AN01-2"),
@@ -1926,6 +1983,8 @@ static const struct edp_panel_entry edp_panels[] = {
EDP_PANEL_ENTRY('K', 'D', 'C', 0x0809, &delay_200_500_e50, "KD116N2930A15"),
EDP_PANEL_ENTRY('K', 'D', 'B', 0x1120, &delay_200_500_e80_d50, "116N29-30NK-C007"),
+ EDP_PANEL_ENTRY('S', 'D', 'C', 0x416d, &delay_100_500_e200, "ATNA45AF01"),
+
EDP_PANEL_ENTRY('S', 'H', 'P', 0x1511, &delay_200_500_e50, "LQ140M1JW48"),
EDP_PANEL_ENTRY('S', 'H', 'P', 0x1523, &sharp_lq140m1jw46.delay, "LQ140M1JW46"),
EDP_PANEL_ENTRY('S', 'H', 'P', 0x154c, &delay_200_500_p2e100, "LQ116M1JW10"),
diff --git a/drivers/gpu/drm/panel/panel-himax-hx8394.c b/drivers/gpu/drm/panel/panel-himax-hx8394.c
index c73243d85de7..ff0dc08b9829 100644
--- a/drivers/gpu/drm/panel/panel-himax-hx8394.c
+++ b/drivers/gpu/drm/panel/panel-himax-hx8394.c
@@ -38,6 +38,7 @@
#define HX8394_CMD_SETMIPI 0xba
#define HX8394_CMD_SETOTP 0xbb
#define HX8394_CMD_SETREGBANK 0xbd
+#define HX8394_CMD_UNKNOWN5 0xbf
#define HX8394_CMD_UNKNOWN1 0xc0
#define HX8394_CMD_SETDGCLUT 0xc1
#define HX8394_CMD_SETID 0xc3
@@ -52,6 +53,7 @@
#define HX8394_CMD_SETGIP1 0xd5
#define HX8394_CMD_SETGIP2 0xd6
#define HX8394_CMD_SETGPO 0xd6
+#define HX8394_CMD_UNKNOWN4 0xd8
#define HX8394_CMD_SETSCALING 0xdd
#define HX8394_CMD_SETIDLE 0xdf
#define HX8394_CMD_SETGAMMA 0xe0
@@ -68,7 +70,7 @@ struct hx8394 {
struct gpio_desc *reset_gpio;
struct regulator *vcc;
struct regulator *iovcc;
- bool prepared;
+ enum drm_panel_orientation orientation;
const struct hx8394_panel_desc *desc;
};
@@ -203,6 +205,140 @@ static const struct hx8394_panel_desc hsd060bhw4_desc = {
.init_sequence = hsd060bhw4_init_sequence,
};
+static int powkiddy_x55_init_sequence(struct hx8394 *ctx)
+{
+ struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
+
+ /* 5.19.8 SETEXTC: Set extension command (B9h) */
+ mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETEXTC,
+ 0xff, 0x83, 0x94);
+
+ /* 5.19.9 SETMIPI: Set MIPI control (BAh) */
+ mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETMIPI,
+ 0x63, 0x03, 0x68, 0x6b, 0xb2, 0xc0);
+
+ /* 5.19.2 SETPOWER: Set power (B1h) */
+ mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETPOWER,
+ 0x48, 0x12, 0x72, 0x09, 0x32, 0x54, 0x71, 0x71, 0x57, 0x47);
+
+ /* 5.19.3 SETDISP: Set display related register (B2h) */
+ mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETDISP,
+ 0x00, 0x80, 0x64, 0x2c, 0x16, 0x2f);
+
+ /* 5.19.4 SETCYC: Set display waveform cycles (B4h) */
+ mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETCYC,
+ 0x73, 0x74, 0x73, 0x74, 0x73, 0x74, 0x01, 0x0c, 0x86, 0x75,
+ 0x00, 0x3f, 0x73, 0x74, 0x73, 0x74, 0x73, 0x74, 0x01, 0x0c,
+ 0x86);
+
+ /* 5.19.5 SETVCOM: Set VCOM voltage (B6h) */
+ mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETVCOM,
+ 0x6e, 0x6e);
+
+ /* 5.19.19 SETGIP0: Set GIP Option0 (D3h) */
+ mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETGIP0,
+ 0x00, 0x00, 0x07, 0x07, 0x40, 0x07, 0x0c, 0x00, 0x08, 0x10,
+ 0x08, 0x00, 0x08, 0x54, 0x15, 0x0a, 0x05, 0x0a, 0x02, 0x15,
+ 0x06, 0x05, 0x06, 0x47, 0x44, 0x0a, 0x0a, 0x4b, 0x10, 0x07,
+ 0x07, 0x0c, 0x40);
+
+ /* 5.19.20 Set GIP Option1 (D5h) */
+ mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETGIP1,
+ 0x1c, 0x1c, 0x1d, 0x1d, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05,
+ 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x24, 0x25, 0x18, 0x18,
+ 0x26, 0x27, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
+ 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x20, 0x21,
+ 0x18, 0x18, 0x18, 0x18);
+
+ /* 5.19.21 Set GIP Option2 (D6h) */
+ mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETGIP2,
+ 0x1c, 0x1c, 0x1d, 0x1d, 0x07, 0x06, 0x05, 0x04, 0x03, 0x02,
+ 0x01, 0x00, 0x0b, 0x0a, 0x09, 0x08, 0x21, 0x20, 0x18, 0x18,
+ 0x27, 0x26, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
+ 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x25, 0x24,
+ 0x18, 0x18, 0x18, 0x18);
+
+ /* 5.19.25 SETGAMMA: Set gamma curve related setting (E0h) */
+ mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETGAMMA,
+ 0x00, 0x0a, 0x15, 0x1b, 0x1e, 0x21, 0x24, 0x22, 0x47, 0x56,
+ 0x65, 0x66, 0x6e, 0x82, 0x88, 0x8b, 0x9a, 0x9d, 0x98, 0xa8,
+ 0xb9, 0x5d, 0x5c, 0x61, 0x66, 0x6a, 0x6f, 0x7f, 0x7f, 0x00,
+ 0x0a, 0x15, 0x1b, 0x1e, 0x21, 0x24, 0x22, 0x47, 0x56, 0x65,
+ 0x65, 0x6e, 0x81, 0x87, 0x8b, 0x98, 0x9d, 0x99, 0xa8, 0xba,
+ 0x5d, 0x5d, 0x62, 0x67, 0x6b, 0x72, 0x7f, 0x7f);
+
+ /* Unknown command, not listed in the HX8394-F datasheet */
+ mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_UNKNOWN1,
+ 0x1f, 0x31);
+
+ /* 5.19.17 SETPANEL (CCh) */
+ mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETPANEL,
+ 0x0b);
+
+ /* Unknown command, not listed in the HX8394-F datasheet */
+ mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_UNKNOWN3,
+ 0x02);
+
+ /* 5.19.11 Set register bank (BDh) */
+ mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETREGBANK,
+ 0x02);
+
+ /* Unknown command, not listed in the HX8394-F datasheet */
+ mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_UNKNOWN4,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff);
+
+ /* 5.19.11 Set register bank (BDh) */
+ mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETREGBANK,
+ 0x00);
+
+ /* 5.19.11 Set register bank (BDh) */
+ mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETREGBANK,
+ 0x01);
+
+ /* 5.19.2 SETPOWER: Set power (B1h) */
+ mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETPOWER,
+ 0x00);
+
+ /* 5.19.11 Set register bank (BDh) */
+ mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETREGBANK,
+ 0x00);
+
+ /* Unknown command, not listed in the HX8394-F datasheet */
+ mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_UNKNOWN5,
+ 0x40, 0x81, 0x50, 0x00, 0x1a, 0xfc, 0x01);
+
+ /* Unknown command, not listed in the HX8394-F datasheet */
+ mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_UNKNOWN2,
+ 0xed);
+
+ return 0;
+}
+
+static const struct drm_display_mode powkiddy_x55_mode = {
+ .hdisplay = 720,
+ .hsync_start = 720 + 44,
+ .hsync_end = 720 + 44 + 20,
+ .htotal = 720 + 44 + 20 + 20,
+ .vdisplay = 1280,
+ .vsync_start = 1280 + 12,
+ .vsync_end = 1280 + 12 + 10,
+ .vtotal = 1280 + 12 + 10 + 10,
+ .clock = 63290,
+ .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC,
+ .width_mm = 67,
+ .height_mm = 121,
+};
+
+static const struct hx8394_panel_desc powkiddy_x55_desc = {
+ .mode = &powkiddy_x55_mode,
+ .lanes = 4,
+ .mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST |
+ MIPI_DSI_MODE_LPM | MIPI_DSI_MODE_NO_EOT_PACKET,
+ .format = MIPI_DSI_FMT_RGB888,
+ .init_sequence = powkiddy_x55_init_sequence,
+};
+
static int hx8394_enable(struct drm_panel *panel)
{
struct hx8394 *ctx = panel_to_hx8394(panel);
@@ -262,16 +398,11 @@ static int hx8394_unprepare(struct drm_panel *panel)
{
struct hx8394 *ctx = panel_to_hx8394(panel);
- if (!ctx->prepared)
- return 0;
-
gpiod_set_value_cansleep(ctx->reset_gpio, 1);
regulator_disable(ctx->iovcc);
regulator_disable(ctx->vcc);
- ctx->prepared = false;
-
return 0;
}
@@ -280,9 +411,6 @@ static int hx8394_prepare(struct drm_panel *panel)
struct hx8394 *ctx = panel_to_hx8394(panel);
int ret;
- if (ctx->prepared)
- return 0;
-
gpiod_set_value_cansleep(ctx->reset_gpio, 1);
ret = regulator_enable(ctx->vcc);
@@ -301,8 +429,6 @@ static int hx8394_prepare(struct drm_panel *panel)
msleep(180);
- ctx->prepared = true;
-
return 0;
disable_vcc:
@@ -335,12 +461,20 @@ static int hx8394_get_modes(struct drm_panel *panel,
return 1;
}
+static enum drm_panel_orientation hx8394_get_orientation(struct drm_panel *panel)
+{
+ struct hx8394 *ctx = panel_to_hx8394(panel);
+
+ return ctx->orientation;
+}
+
static const struct drm_panel_funcs hx8394_drm_funcs = {
.disable = hx8394_disable,
.unprepare = hx8394_unprepare,
.prepare = hx8394_prepare,
.enable = hx8394_enable,
.get_modes = hx8394_get_modes,
+ .get_orientation = hx8394_get_orientation,
};
static int hx8394_probe(struct mipi_dsi_device *dsi)
@@ -358,6 +492,12 @@ static int hx8394_probe(struct mipi_dsi_device *dsi)
return dev_err_probe(dev, PTR_ERR(ctx->reset_gpio),
"Failed to get reset gpio\n");
+ ret = of_drm_get_panel_orientation(dev->of_node, &ctx->orientation);
+ if (ret < 0) {
+ dev_err(dev, "%pOF: failed to get orientation %d\n", dev->of_node, ret);
+ return ret;
+ }
+
mipi_dsi_set_drvdata(dsi, ctx);
ctx->dev = dev;
@@ -401,27 +541,11 @@ static int hx8394_probe(struct mipi_dsi_device *dsi)
return 0;
}
-static void hx8394_shutdown(struct mipi_dsi_device *dsi)
-{
- struct hx8394 *ctx = mipi_dsi_get_drvdata(dsi);
- int ret;
-
- ret = drm_panel_disable(&ctx->panel);
- if (ret < 0)
- dev_err(&dsi->dev, "Failed to disable panel: %d\n", ret);
-
- ret = drm_panel_unprepare(&ctx->panel);
- if (ret < 0)
- dev_err(&dsi->dev, "Failed to unprepare panel: %d\n", ret);
-}
-
static void hx8394_remove(struct mipi_dsi_device *dsi)
{
struct hx8394 *ctx = mipi_dsi_get_drvdata(dsi);
int ret;
- hx8394_shutdown(dsi);
-
ret = mipi_dsi_detach(dsi);
if (ret < 0)
dev_err(&dsi->dev, "Failed to detach from DSI host: %d\n", ret);
@@ -431,6 +555,7 @@ static void hx8394_remove(struct mipi_dsi_device *dsi)
static const struct of_device_id hx8394_of_match[] = {
{ .compatible = "hannstar,hsd060bhw4", .data = &hsd060bhw4_desc },
+ { .compatible = "powkiddy,x55-panel", .data = &powkiddy_x55_desc },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, hx8394_of_match);
@@ -438,7 +563,6 @@ MODULE_DEVICE_TABLE(of, hx8394_of_match);
static struct mipi_dsi_driver hx8394_driver = {
.probe = hx8394_probe,
.remove = hx8394_remove,
- .shutdown = hx8394_shutdown,
.driver = {
.name = DRV_NAME,
.of_match_table = hx8394_of_match,
diff --git a/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c b/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c
index 7838947a1bf3..2ffe5f68a890 100644
--- a/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c
+++ b/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c
@@ -830,6 +830,203 @@ static const struct ili9881c_instr w552946ab_init[] = {
ILI9881C_SWITCH_PAGE_INSTR(0),
};
+static const struct ili9881c_instr am8001280g_init[] = {
+ ILI9881C_SWITCH_PAGE_INSTR(3),
+ ILI9881C_COMMAND_INSTR(0x01, 0x00),
+ ILI9881C_COMMAND_INSTR(0x02, 0x00),
+ ILI9881C_COMMAND_INSTR(0x03, 0x73),
+ ILI9881C_COMMAND_INSTR(0x04, 0xD3),
+ ILI9881C_COMMAND_INSTR(0x05, 0x00),
+ ILI9881C_COMMAND_INSTR(0x06, 0x0A),
+ ILI9881C_COMMAND_INSTR(0x07, 0x0E),
+ ILI9881C_COMMAND_INSTR(0x08, 0x00),
+ ILI9881C_COMMAND_INSTR(0x09, 0x01),
+ ILI9881C_COMMAND_INSTR(0x0a, 0x01),
+ ILI9881C_COMMAND_INSTR(0x0b, 0x01),
+ ILI9881C_COMMAND_INSTR(0x0c, 0x01),
+ ILI9881C_COMMAND_INSTR(0x0d, 0x01),
+ ILI9881C_COMMAND_INSTR(0x0e, 0x01),
+ ILI9881C_COMMAND_INSTR(0x0f, 0x01),
+ ILI9881C_COMMAND_INSTR(0x10, 0x01),
+ ILI9881C_COMMAND_INSTR(0x11, 0x00),
+ ILI9881C_COMMAND_INSTR(0x12, 0x00),
+ ILI9881C_COMMAND_INSTR(0x13, 0x00),
+ ILI9881C_COMMAND_INSTR(0x14, 0x00),
+ ILI9881C_COMMAND_INSTR(0x15, 0x00),
+ ILI9881C_COMMAND_INSTR(0x16, 0x00),
+ ILI9881C_COMMAND_INSTR(0x17, 0x00),
+ ILI9881C_COMMAND_INSTR(0x18, 0x00),
+ ILI9881C_COMMAND_INSTR(0x19, 0x00),
+ ILI9881C_COMMAND_INSTR(0x1a, 0x00),
+ ILI9881C_COMMAND_INSTR(0x1b, 0x00),
+ ILI9881C_COMMAND_INSTR(0x1c, 0x00),
+ ILI9881C_COMMAND_INSTR(0x1d, 0x00),
+ ILI9881C_COMMAND_INSTR(0x1e, 0x40),
+ ILI9881C_COMMAND_INSTR(0x1f, 0x80),
+ ILI9881C_COMMAND_INSTR(0x20, 0x06),
+ ILI9881C_COMMAND_INSTR(0x21, 0x01),
+ ILI9881C_COMMAND_INSTR(0x22, 0x00),
+ ILI9881C_COMMAND_INSTR(0x23, 0x00),
+ ILI9881C_COMMAND_INSTR(0x24, 0x00),
+ ILI9881C_COMMAND_INSTR(0x25, 0x00),
+ ILI9881C_COMMAND_INSTR(0x26, 0x00),
+ ILI9881C_COMMAND_INSTR(0x27, 0x00),
+ ILI9881C_COMMAND_INSTR(0x28, 0x33),
+ ILI9881C_COMMAND_INSTR(0x29, 0x03),
+ ILI9881C_COMMAND_INSTR(0x2a, 0x00),
+ ILI9881C_COMMAND_INSTR(0x2b, 0x00),
+ ILI9881C_COMMAND_INSTR(0x2c, 0x00),
+ ILI9881C_COMMAND_INSTR(0x2d, 0x00),
+ ILI9881C_COMMAND_INSTR(0x2e, 0x00),
+ ILI9881C_COMMAND_INSTR(0x2f, 0x00),
+ ILI9881C_COMMAND_INSTR(0x30, 0x00),
+ ILI9881C_COMMAND_INSTR(0x31, 0x00),
+ ILI9881C_COMMAND_INSTR(0x32, 0x00),
+ ILI9881C_COMMAND_INSTR(0x33, 0x00),
+ ILI9881C_COMMAND_INSTR(0x34, 0x03),
+ ILI9881C_COMMAND_INSTR(0x35, 0x00),
+ ILI9881C_COMMAND_INSTR(0x36, 0x03),
+ ILI9881C_COMMAND_INSTR(0x37, 0x00),
+ ILI9881C_COMMAND_INSTR(0x38, 0x00),
+ ILI9881C_COMMAND_INSTR(0x39, 0x00),
+ ILI9881C_COMMAND_INSTR(0x3a, 0x40),
+ ILI9881C_COMMAND_INSTR(0x3b, 0x40),
+ ILI9881C_COMMAND_INSTR(0x3c, 0x00),
+ ILI9881C_COMMAND_INSTR(0x3d, 0x00),
+ ILI9881C_COMMAND_INSTR(0x3e, 0x00),
+ ILI9881C_COMMAND_INSTR(0x3f, 0x00),
+ ILI9881C_COMMAND_INSTR(0x40, 0x00),
+ ILI9881C_COMMAND_INSTR(0x41, 0x00),
+ ILI9881C_COMMAND_INSTR(0x42, 0x00),
+ ILI9881C_COMMAND_INSTR(0x43, 0x00),
+ ILI9881C_COMMAND_INSTR(0x44, 0x00),
+
+ ILI9881C_COMMAND_INSTR(0x50, 0x01),
+ ILI9881C_COMMAND_INSTR(0x51, 0x23),
+ ILI9881C_COMMAND_INSTR(0x52, 0x45),
+ ILI9881C_COMMAND_INSTR(0x53, 0x67),
+ ILI9881C_COMMAND_INSTR(0x54, 0x89),
+ ILI9881C_COMMAND_INSTR(0x55, 0xab),
+ ILI9881C_COMMAND_INSTR(0x56, 0x01),
+ ILI9881C_COMMAND_INSTR(0x57, 0x23),
+ ILI9881C_COMMAND_INSTR(0x58, 0x45),
+ ILI9881C_COMMAND_INSTR(0x59, 0x67),
+ ILI9881C_COMMAND_INSTR(0x5a, 0x89),
+ ILI9881C_COMMAND_INSTR(0x5b, 0xab),
+ ILI9881C_COMMAND_INSTR(0x5c, 0xcd),
+ ILI9881C_COMMAND_INSTR(0x5d, 0xef),
+
+ ILI9881C_COMMAND_INSTR(0x5e, 0x11),
+ ILI9881C_COMMAND_INSTR(0x5f, 0x02),
+ ILI9881C_COMMAND_INSTR(0x60, 0x00),
+ ILI9881C_COMMAND_INSTR(0x61, 0x01),
+ ILI9881C_COMMAND_INSTR(0x62, 0x0D),
+ ILI9881C_COMMAND_INSTR(0x63, 0x0C),
+ ILI9881C_COMMAND_INSTR(0x64, 0x0F),
+ ILI9881C_COMMAND_INSTR(0x65, 0x0E),
+ ILI9881C_COMMAND_INSTR(0x66, 0x06),
+ ILI9881C_COMMAND_INSTR(0x67, 0x07),
+ ILI9881C_COMMAND_INSTR(0x68, 0x02),
+ ILI9881C_COMMAND_INSTR(0x69, 0x02),
+ ILI9881C_COMMAND_INSTR(0x6a, 0x08),
+ ILI9881C_COMMAND_INSTR(0x6b, 0x02),
+ ILI9881C_COMMAND_INSTR(0x6c, 0x02),
+ ILI9881C_COMMAND_INSTR(0x6d, 0x02),
+ ILI9881C_COMMAND_INSTR(0x6e, 0x02),
+ ILI9881C_COMMAND_INSTR(0x6f, 0x02),
+ ILI9881C_COMMAND_INSTR(0x70, 0x02),
+ ILI9881C_COMMAND_INSTR(0x71, 0x02),
+ ILI9881C_COMMAND_INSTR(0x72, 0x02),
+ ILI9881C_COMMAND_INSTR(0x73, 0x02),
+ ILI9881C_COMMAND_INSTR(0x74, 0x02),
+ ILI9881C_COMMAND_INSTR(0x75, 0x02),
+ ILI9881C_COMMAND_INSTR(0x76, 0x00),
+ ILI9881C_COMMAND_INSTR(0x77, 0x01),
+ ILI9881C_COMMAND_INSTR(0x78, 0x0D),
+ ILI9881C_COMMAND_INSTR(0x79, 0x0C),
+ ILI9881C_COMMAND_INSTR(0x7a, 0x0F),
+ ILI9881C_COMMAND_INSTR(0x7b, 0x0E),
+ ILI9881C_COMMAND_INSTR(0x7c, 0x06),
+ ILI9881C_COMMAND_INSTR(0x7d, 0x07),
+ ILI9881C_COMMAND_INSTR(0x7e, 0x02),
+ ILI9881C_COMMAND_INSTR(0x7f, 0x02),
+ ILI9881C_COMMAND_INSTR(0x80, 0x08),
+ ILI9881C_COMMAND_INSTR(0x81, 0x02),
+ ILI9881C_COMMAND_INSTR(0x82, 0x02),
+ ILI9881C_COMMAND_INSTR(0x83, 0x02),
+ ILI9881C_COMMAND_INSTR(0x84, 0x02),
+ ILI9881C_COMMAND_INSTR(0x85, 0x02),
+ ILI9881C_COMMAND_INSTR(0x86, 0x02),
+ ILI9881C_COMMAND_INSTR(0x87, 0x02),
+ ILI9881C_COMMAND_INSTR(0x88, 0x02),
+ ILI9881C_COMMAND_INSTR(0x89, 0x02),
+ ILI9881C_COMMAND_INSTR(0x8A, 0x02),
+
+ ILI9881C_SWITCH_PAGE_INSTR(4),
+ ILI9881C_COMMAND_INSTR(0x6c, 0x15),
+ ILI9881C_COMMAND_INSTR(0x6e, 0x30),
+ ILI9881C_COMMAND_INSTR(0x6f, 0x33),
+ ILI9881C_COMMAND_INSTR(0x8d, 0x15),
+ ILI9881C_COMMAND_INSTR(0x3a, 0xa4),
+ ILI9881C_COMMAND_INSTR(0x87, 0xba),
+ ILI9881C_COMMAND_INSTR(0x26, 0x76),
+ ILI9881C_COMMAND_INSTR(0xb2, 0xd1),
+
+ ILI9881C_SWITCH_PAGE_INSTR(1),
+ ILI9881C_COMMAND_INSTR(0x22, 0x0A),
+ ILI9881C_COMMAND_INSTR(0x31, 0x0B),
+ ILI9881C_COMMAND_INSTR(0x50, 0xa5),
+ ILI9881C_COMMAND_INSTR(0x51, 0xa0),
+ ILI9881C_COMMAND_INSTR(0x53, 0x70),
+ ILI9881C_COMMAND_INSTR(0x55, 0x7A),
+ ILI9881C_COMMAND_INSTR(0x60, 0x14),
+
+ ILI9881C_COMMAND_INSTR(0xA0, 0x00),
+ ILI9881C_COMMAND_INSTR(0xA1, 0x53),
+ ILI9881C_COMMAND_INSTR(0xA2, 0x50),
+ ILI9881C_COMMAND_INSTR(0xA3, 0x20),
+ ILI9881C_COMMAND_INSTR(0xA4, 0x27),
+ ILI9881C_COMMAND_INSTR(0xA5, 0x33),
+ ILI9881C_COMMAND_INSTR(0xA6, 0x25),
+ ILI9881C_COMMAND_INSTR(0xA7, 0x25),
+ ILI9881C_COMMAND_INSTR(0xA8, 0xD4),
+ ILI9881C_COMMAND_INSTR(0xA9, 0x1A),
+ ILI9881C_COMMAND_INSTR(0xAA, 0x2B),
+ ILI9881C_COMMAND_INSTR(0xAB, 0xB5),
+ ILI9881C_COMMAND_INSTR(0xAC, 0x19),
+ ILI9881C_COMMAND_INSTR(0xAD, 0x18),
+ ILI9881C_COMMAND_INSTR(0xAE, 0x53),
+ ILI9881C_COMMAND_INSTR(0xAF, 0x1A),
+ ILI9881C_COMMAND_INSTR(0xB0, 0x25),
+ ILI9881C_COMMAND_INSTR(0xB1, 0x62),
+ ILI9881C_COMMAND_INSTR(0xB2, 0x6A),
+ ILI9881C_COMMAND_INSTR(0xB3, 0x31),
+
+ ILI9881C_COMMAND_INSTR(0xC0, 0x00),
+ ILI9881C_COMMAND_INSTR(0xC1, 0x53),
+ ILI9881C_COMMAND_INSTR(0xC2, 0x50),
+ ILI9881C_COMMAND_INSTR(0xC3, 0x20),
+ ILI9881C_COMMAND_INSTR(0xC4, 0x27),
+ ILI9881C_COMMAND_INSTR(0xC5, 0x33),
+ ILI9881C_COMMAND_INSTR(0xC6, 0x25),
+ ILI9881C_COMMAND_INSTR(0xC7, 0x25),
+ ILI9881C_COMMAND_INSTR(0xC8, 0xD4),
+ ILI9881C_COMMAND_INSTR(0xC9, 0x1A),
+ ILI9881C_COMMAND_INSTR(0xCA, 0x2B),
+ ILI9881C_COMMAND_INSTR(0xCB, 0xB5),
+ ILI9881C_COMMAND_INSTR(0xCC, 0x19),
+ ILI9881C_COMMAND_INSTR(0xCD, 0x18),
+ ILI9881C_COMMAND_INSTR(0xCE, 0x53),
+ ILI9881C_COMMAND_INSTR(0xCF, 0x1A),
+ ILI9881C_COMMAND_INSTR(0xD0, 0x25),
+ ILI9881C_COMMAND_INSTR(0xD1, 0x62),
+ ILI9881C_COMMAND_INSTR(0xD2, 0x6A),
+ ILI9881C_COMMAND_INSTR(0xD3, 0x31),
+ ILI9881C_SWITCH_PAGE_INSTR(0),
+ ILI9881C_COMMAND_INSTR(MIPI_DCS_WRITE_CONTROL_DISPLAY, 0x2c),
+ ILI9881C_COMMAND_INSTR(MIPI_DCS_WRITE_POWER_SAVE, 0x00),
+};
+
static inline struct ili9881c *panel_to_ili9881c(struct drm_panel *panel)
{
return container_of(panel, struct ili9881c, panel);
@@ -1014,6 +1211,23 @@ static const struct drm_display_mode w552946aba_default_mode = {
.height_mm = 121,
};
+static const struct drm_display_mode am8001280g_default_mode = {
+ .clock = 67911,
+
+ .hdisplay = 800,
+ .hsync_start = 800 + 20,
+ .hsync_end = 800 + 20 + 32,
+ .htotal = 800 + 20 + 32 + 20,
+
+ .vdisplay = 1280,
+ .vsync_start = 1280 + 6,
+ .vsync_end = 1280 + 6 + 8,
+ .vtotal = 1280 + 6 + 8 + 4,
+
+ .width_mm = 94,
+ .height_mm = 151,
+};
+
static int ili9881c_get_modes(struct drm_panel *panel,
struct drm_connector *connector)
{
@@ -1094,6 +1308,8 @@ static int ili9881c_dsi_probe(struct mipi_dsi_device *dsi)
return ret;
}
+ ctx->panel.prepare_prev_first = true;
+
ret = drm_panel_of_backlight(&ctx->panel);
if (ret)
return ret;
@@ -1145,11 +1361,20 @@ static const struct ili9881c_desc w552946aba_desc = {
MIPI_DSI_MODE_LPM | MIPI_DSI_MODE_NO_EOT_PACKET,
};
+static const struct ili9881c_desc am8001280g_desc = {
+ .init = am8001280g_init,
+ .init_length = ARRAY_SIZE(am8001280g_init),
+ .mode = &am8001280g_default_mode,
+ .mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_SYNC_PULSE |
+ MIPI_DSI_CLOCK_NON_CONTINUOUS | MIPI_DSI_MODE_LPM,
+};
+
static const struct of_device_id ili9881c_of_match[] = {
{ .compatible = "bananapi,lhr050h41", .data = &lhr050h41_desc },
{ .compatible = "feixin,k101-im2byl02", .data = &k101_im2byl02_desc },
{ .compatible = "tdo,tl050hdv35", .data = &tl050hdv35_desc },
{ .compatible = "wanchanglong,w552946aba", .data = &w552946aba_desc },
+ { .compatible = "ampire,am8001280g", .data = &am8001280g_desc },
{ }
};
MODULE_DEVICE_TABLE(of, ili9881c_of_match);
diff --git a/drivers/gpu/drm/panel/panel-novatek-nt36523.c b/drivers/gpu/drm/panel/panel-novatek-nt36523.c
index 9b9a7eb1bc60..a189ce236328 100644
--- a/drivers/gpu/drm/panel/panel-novatek-nt36523.c
+++ b/drivers/gpu/drm/panel/panel-novatek-nt36523.c
@@ -1254,9 +1254,9 @@ static int nt36523_probe(struct mipi_dsi_device *dsi)
return dev_err_probe(dev, -EPROBE_DEFER, "cannot get secondary DSI host\n");
pinfo->dsi[1] = mipi_dsi_device_register_full(dsi1_host, info);
- if (!pinfo->dsi[1]) {
+ if (IS_ERR(pinfo->dsi[1])) {
dev_err(dev, "cannot get secondary DSI device\n");
- return -ENODEV;
+ return PTR_ERR(pinfo->dsi[1]);
}
}
diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
index 9367a4572dcf..8017ad33cf18 100644
--- a/drivers/gpu/drm/panel/panel-simple.c
+++ b/drivers/gpu/drm/panel/panel-simple.c
@@ -1324,6 +1324,35 @@ static const struct panel_desc bananapi_s070wv20_ct16 = {
},
};
+static const struct drm_display_mode boe_bp101wx1_100_mode = {
+ .clock = 78945,
+ .hdisplay = 1280,
+ .hsync_start = 1280 + 0,
+ .hsync_end = 1280 + 0 + 2,
+ .htotal = 1280 + 62 + 0 + 2,
+ .vdisplay = 800,
+ .vsync_start = 800 + 8,
+ .vsync_end = 800 + 8 + 2,
+ .vtotal = 800 + 6 + 8 + 2,
+};
+
+static const struct panel_desc boe_bp101wx1_100 = {
+ .modes = &boe_bp101wx1_100_mode,
+ .num_modes = 1,
+ .bpc = 8,
+ .size = {
+ .width = 217,
+ .height = 136,
+ },
+ .delay = {
+ .enable = 50,
+ .disable = 50,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA,
+ .bus_flags = DRM_BUS_FLAG_DE_HIGH,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
+};
+
static const struct display_timing boe_ev121wxm_n10_1850_timing = {
.pixelclock = { 69922000, 71000000, 72293000 },
.hactive = { 1280, 1280, 1280 },
@@ -1973,6 +2002,33 @@ static const struct panel_desc eink_vb3300_kca = {
.connector_type = DRM_MODE_CONNECTOR_DPI,
};
+static const struct display_timing evervision_vgg644804_timing = {
+ .pixelclock = { 25175000, 25175000, 25175000 },
+ .hactive = { 640, 640, 640 },
+ .hfront_porch = { 16, 16, 16 },
+ .hback_porch = { 82, 114, 170 },
+ .hsync_len = { 5, 30, 30 },
+ .vactive = { 480, 480, 480 },
+ .vfront_porch = { 10, 10, 10 },
+ .vback_porch = { 30, 32, 34 },
+ .vsync_len = { 1, 3, 5 },
+ .flags = DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW |
+ DISPLAY_FLAGS_DE_HIGH | DISPLAY_FLAGS_PIXDATA_POSEDGE |
+ DISPLAY_FLAGS_SYNC_POSEDGE,
+};
+
+static const struct panel_desc evervision_vgg644804 = {
+ .timings = &evervision_vgg644804_timing,
+ .num_timings = 1,
+ .bpc = 8,
+ .size = {
+ .width = 115,
+ .height = 86,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB666_1X7X3_SPWG,
+ .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_SAMPLE_NEGEDGE,
+};
+
static const struct display_timing evervision_vgg804821_timing = {
.pixelclock = { 27600000, 33300000, 50000000 },
.hactive = { 800, 800, 800 },
@@ -4254,6 +4310,9 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "bananapi,s070wv20-ct16",
.data = &bananapi_s070wv20_ct16,
}, {
+ .compatible = "boe,bp101wx1-100",
+ .data = &boe_bp101wx1_100,
+ }, {
.compatible = "boe,ev121wxm-n10-1850",
.data = &boe_ev121wxm_n10_1850,
}, {
@@ -4335,6 +4394,9 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "eink,vb3300-kca",
.data = &eink_vb3300_kca,
}, {
+ .compatible = "evervision,vgg644804",
+ .data = &evervision_vgg644804,
+ }, {
.compatible = "evervision,vgg804821",
.data = &evervision_vgg804821,
}, {
diff --git a/drivers/gpu/drm/panfrost/panfrost_devfreq.c b/drivers/gpu/drm/panfrost/panfrost_devfreq.c
index f59c82ea8870..2d30da38c2c3 100644
--- a/drivers/gpu/drm/panfrost/panfrost_devfreq.c
+++ b/drivers/gpu/drm/panfrost/panfrost_devfreq.c
@@ -29,14 +29,20 @@ static void panfrost_devfreq_update_utilization(struct panfrost_devfreq *pfdevfr
static int panfrost_devfreq_target(struct device *dev, unsigned long *freq,
u32 flags)
{
+ struct panfrost_device *ptdev = dev_get_drvdata(dev);
struct dev_pm_opp *opp;
+ int err;
opp = devfreq_recommended_opp(dev, freq, flags);
if (IS_ERR(opp))
return PTR_ERR(opp);
dev_pm_opp_put(opp);
- return dev_pm_opp_set_rate(dev, *freq);
+ err = dev_pm_opp_set_rate(dev, *freq);
+ if (!err)
+ ptdev->pfdevfreq.current_frequency = *freq;
+
+ return err;
}
static void panfrost_devfreq_reset(struct panfrost_devfreq *pfdevfreq)
@@ -58,7 +64,6 @@ static int panfrost_devfreq_get_dev_status(struct device *dev,
spin_lock_irqsave(&pfdevfreq->lock, irqflags);
panfrost_devfreq_update_utilization(pfdevfreq);
- pfdevfreq->current_frequency = status->current_frequency;
status->total_time = ktime_to_ns(ktime_add(pfdevfreq->busy_time,
pfdevfreq->idle_time));
@@ -165,6 +170,14 @@ int panfrost_devfreq_init(struct panfrost_device *pfdev)
panfrost_devfreq_profile.initial_freq = cur_freq;
/*
+ * We could wait until panfrost_devfreq_target() to set this value, but
+ * since the simple_ondemand governor works asynchronously, there's a
+ * chance by the time someone opens the device's fdinfo file, current
+ * frequency hasn't been updated yet, so let's just do an early set.
+ */
+ pfdevfreq->current_frequency = cur_freq;
+
+ /*
* Set the recommend OPP this will enable and configure the regulator
* if any and will avoid a switch off by regulator_late_cleanup()
*/
diff --git a/drivers/gpu/drm/panfrost/panfrost_device.c b/drivers/gpu/drm/panfrost/panfrost_device.c
index c90ad5ee34e7..a45e4addcc19 100644
--- a/drivers/gpu/drm/panfrost/panfrost_device.c
+++ b/drivers/gpu/drm/panfrost/panfrost_device.c
@@ -421,6 +421,9 @@ static int panfrost_device_runtime_suspend(struct device *dev)
return -EBUSY;
panfrost_devfreq_suspend(pfdev);
+ panfrost_job_suspend_irq(pfdev);
+ panfrost_mmu_suspend_irq(pfdev);
+ panfrost_gpu_suspend_irq(pfdev);
panfrost_gpu_power_off(pfdev);
return 0;
diff --git a/drivers/gpu/drm/panfrost/panfrost_device.h b/drivers/gpu/drm/panfrost/panfrost_device.h
index 0fc558db6bfd..62f7e3527385 100644
--- a/drivers/gpu/drm/panfrost/panfrost_device.h
+++ b/drivers/gpu/drm/panfrost/panfrost_device.h
@@ -25,6 +25,13 @@ struct panfrost_perfcnt;
#define NUM_JOB_SLOTS 3
#define MAX_PM_DOMAINS 5
+enum panfrost_drv_comp_bits {
+ PANFROST_COMP_BIT_GPU,
+ PANFROST_COMP_BIT_JOB,
+ PANFROST_COMP_BIT_MMU,
+ PANFROST_COMP_BIT_MAX
+};
+
/**
* enum panfrost_gpu_pm - Supported kernel power management features
* @GPU_PM_CLK_DIS: Allow disabling clocks during system suspend
@@ -94,6 +101,8 @@ struct panfrost_device {
struct device *dev;
struct drm_device *ddev;
struct platform_device *pdev;
+ int gpu_irq;
+ int mmu_irq;
void __iomem *iomem;
struct clk *clock;
@@ -107,6 +116,7 @@ struct panfrost_device {
struct panfrost_features features;
const struct panfrost_compatible *comp;
+ DECLARE_BITMAP(is_suspended, PANFROST_COMP_BIT_MAX);
spinlock_t as_lock;
unsigned long as_in_use_mask;
diff --git a/drivers/gpu/drm/panfrost/panfrost_gem.c b/drivers/gpu/drm/panfrost/panfrost_gem.c
index 0cf64456e29a..d47b40b82b0b 100644
--- a/drivers/gpu/drm/panfrost/panfrost_gem.c
+++ b/drivers/gpu/drm/panfrost/panfrost_gem.c
@@ -200,7 +200,7 @@ static enum drm_gem_object_status panfrost_gem_status(struct drm_gem_object *obj
struct panfrost_gem_object *bo = to_panfrost_bo(obj);
enum drm_gem_object_status res = 0;
- if (bo->base.pages)
+ if (bo->base.base.import_attach || bo->base.pages)
res |= DRM_GEM_OBJECT_RESIDENT;
if (bo->base.madv == PANFROST_MADV_DONTNEED)
diff --git a/drivers/gpu/drm/panfrost/panfrost_gpu.c b/drivers/gpu/drm/panfrost/panfrost_gpu.c
index 09f5e1563ebd..9063ce254642 100644
--- a/drivers/gpu/drm/panfrost/panfrost_gpu.c
+++ b/drivers/gpu/drm/panfrost/panfrost_gpu.c
@@ -22,9 +22,13 @@
static irqreturn_t panfrost_gpu_irq_handler(int irq, void *data)
{
struct panfrost_device *pfdev = data;
- u32 state = gpu_read(pfdev, GPU_INT_STAT);
- u32 fault_status = gpu_read(pfdev, GPU_FAULT_STATUS);
+ u32 fault_status, state;
+ if (test_bit(PANFROST_COMP_BIT_GPU, pfdev->is_suspended))
+ return IRQ_NONE;
+
+ fault_status = gpu_read(pfdev, GPU_FAULT_STATUS);
+ state = gpu_read(pfdev, GPU_INT_STAT);
if (!state)
return IRQ_NONE;
@@ -61,6 +65,8 @@ int panfrost_gpu_soft_reset(struct panfrost_device *pfdev)
gpu_write(pfdev, GPU_INT_MASK, 0);
gpu_write(pfdev, GPU_INT_CLEAR, GPU_IRQ_RESET_COMPLETED);
+ clear_bit(PANFROST_COMP_BIT_GPU, pfdev->is_suspended);
+
gpu_write(pfdev, GPU_CMD, GPU_CMD_SOFT_RESET);
ret = readl_relaxed_poll_timeout(pfdev->iomem + GPU_INT_RAWSTAT,
val, val & GPU_IRQ_RESET_COMPLETED, 10, 10000);
@@ -78,7 +84,12 @@ int panfrost_gpu_soft_reset(struct panfrost_device *pfdev)
}
gpu_write(pfdev, GPU_INT_CLEAR, GPU_IRQ_MASK_ALL);
- gpu_write(pfdev, GPU_INT_MASK, GPU_IRQ_MASK_ALL);
+
+ /* Only enable the interrupts we care about */
+ gpu_write(pfdev, GPU_INT_MASK,
+ GPU_IRQ_MASK_ERROR |
+ GPU_IRQ_PERFCNT_SAMPLE_COMPLETED |
+ GPU_IRQ_CLEAN_CACHES_COMPLETED);
/*
* All in-flight jobs should have released their cycle
@@ -425,11 +436,10 @@ void panfrost_gpu_power_on(struct panfrost_device *pfdev)
void panfrost_gpu_power_off(struct panfrost_device *pfdev)
{
- u64 core_mask = panfrost_get_core_mask(pfdev);
int ret;
u32 val;
- gpu_write(pfdev, SHADER_PWROFF_LO, pfdev->features.shader_present & core_mask);
+ gpu_write(pfdev, SHADER_PWROFF_LO, pfdev->features.shader_present);
ret = readl_relaxed_poll_timeout(pfdev->iomem + SHADER_PWRTRANS_LO,
val, !val, 1, 1000);
if (ret)
@@ -441,16 +451,24 @@ void panfrost_gpu_power_off(struct panfrost_device *pfdev)
if (ret)
dev_err(pfdev->dev, "tiler power transition timeout");
- gpu_write(pfdev, L2_PWROFF_LO, pfdev->features.l2_present & core_mask);
+ gpu_write(pfdev, L2_PWROFF_LO, pfdev->features.l2_present);
ret = readl_poll_timeout(pfdev->iomem + L2_PWRTRANS_LO,
val, !val, 0, 1000);
if (ret)
dev_err(pfdev->dev, "l2 power transition timeout");
}
+void panfrost_gpu_suspend_irq(struct panfrost_device *pfdev)
+{
+ set_bit(PANFROST_COMP_BIT_GPU, pfdev->is_suspended);
+
+ gpu_write(pfdev, GPU_INT_MASK, 0);
+ synchronize_irq(pfdev->gpu_irq);
+}
+
int panfrost_gpu_init(struct panfrost_device *pfdev)
{
- int err, irq;
+ int err;
err = panfrost_gpu_soft_reset(pfdev);
if (err)
@@ -465,11 +483,11 @@ int panfrost_gpu_init(struct panfrost_device *pfdev)
dma_set_max_seg_size(pfdev->dev, UINT_MAX);
- irq = platform_get_irq_byname(to_platform_device(pfdev->dev), "gpu");
- if (irq < 0)
- return irq;
+ pfdev->gpu_irq = platform_get_irq_byname(to_platform_device(pfdev->dev), "gpu");
+ if (pfdev->gpu_irq < 0)
+ return pfdev->gpu_irq;
- err = devm_request_irq(pfdev->dev, irq, panfrost_gpu_irq_handler,
+ err = devm_request_irq(pfdev->dev, pfdev->gpu_irq, panfrost_gpu_irq_handler,
IRQF_SHARED, KBUILD_MODNAME "-gpu", pfdev);
if (err) {
dev_err(pfdev->dev, "failed to request gpu irq");
diff --git a/drivers/gpu/drm/panfrost/panfrost_gpu.h b/drivers/gpu/drm/panfrost/panfrost_gpu.h
index 876fdad9f721..d841b86504ea 100644
--- a/drivers/gpu/drm/panfrost/panfrost_gpu.h
+++ b/drivers/gpu/drm/panfrost/panfrost_gpu.h
@@ -15,6 +15,7 @@ u32 panfrost_gpu_get_latest_flush_id(struct panfrost_device *pfdev);
int panfrost_gpu_soft_reset(struct panfrost_device *pfdev);
void panfrost_gpu_power_on(struct panfrost_device *pfdev);
void panfrost_gpu_power_off(struct panfrost_device *pfdev);
+void panfrost_gpu_suspend_irq(struct panfrost_device *pfdev);
void panfrost_cycle_counter_get(struct panfrost_device *pfdev);
void panfrost_cycle_counter_put(struct panfrost_device *pfdev);
diff --git a/drivers/gpu/drm/panfrost/panfrost_job.c b/drivers/gpu/drm/panfrost/panfrost_job.c
index f9446e197428..0c2dbf6ef2a5 100644
--- a/drivers/gpu/drm/panfrost/panfrost_job.c
+++ b/drivers/gpu/drm/panfrost/panfrost_job.c
@@ -405,6 +405,8 @@ void panfrost_job_enable_interrupts(struct panfrost_device *pfdev)
int j;
u32 irq_mask = 0;
+ clear_bit(PANFROST_COMP_BIT_JOB, pfdev->is_suspended);
+
for (j = 0; j < NUM_JOB_SLOTS; j++) {
irq_mask |= MK_JS_MASK(j);
}
@@ -413,6 +415,14 @@ void panfrost_job_enable_interrupts(struct panfrost_device *pfdev)
job_write(pfdev, JOB_INT_MASK, irq_mask);
}
+void panfrost_job_suspend_irq(struct panfrost_device *pfdev)
+{
+ set_bit(PANFROST_COMP_BIT_JOB, pfdev->is_suspended);
+
+ job_write(pfdev, JOB_INT_MASK, 0);
+ synchronize_irq(pfdev->js->irq);
+}
+
static void panfrost_job_handle_err(struct panfrost_device *pfdev,
struct panfrost_job *job,
unsigned int js)
@@ -792,17 +802,25 @@ static irqreturn_t panfrost_job_irq_handler_thread(int irq, void *data)
struct panfrost_device *pfdev = data;
panfrost_job_handle_irqs(pfdev);
- job_write(pfdev, JOB_INT_MASK,
- GENMASK(16 + NUM_JOB_SLOTS - 1, 16) |
- GENMASK(NUM_JOB_SLOTS - 1, 0));
+
+ /* Enable interrupts only if we're not about to get suspended */
+ if (!test_bit(PANFROST_COMP_BIT_JOB, pfdev->is_suspended))
+ job_write(pfdev, JOB_INT_MASK,
+ GENMASK(16 + NUM_JOB_SLOTS - 1, 16) |
+ GENMASK(NUM_JOB_SLOTS - 1, 0));
+
return IRQ_HANDLED;
}
static irqreturn_t panfrost_job_irq_handler(int irq, void *data)
{
struct panfrost_device *pfdev = data;
- u32 status = job_read(pfdev, JOB_INT_STAT);
+ u32 status;
+
+ if (test_bit(PANFROST_COMP_BIT_JOB, pfdev->is_suspended))
+ return IRQ_NONE;
+ status = job_read(pfdev, JOB_INT_STAT);
if (!status)
return IRQ_NONE;
diff --git a/drivers/gpu/drm/panfrost/panfrost_job.h b/drivers/gpu/drm/panfrost/panfrost_job.h
index 17ff808dba07..ec581b97852b 100644
--- a/drivers/gpu/drm/panfrost/panfrost_job.h
+++ b/drivers/gpu/drm/panfrost/panfrost_job.h
@@ -47,6 +47,7 @@ int panfrost_job_get_slot(struct panfrost_job *job);
int panfrost_job_push(struct panfrost_job *job);
void panfrost_job_put(struct panfrost_job *job);
void panfrost_job_enable_interrupts(struct panfrost_device *pfdev);
+void panfrost_job_suspend_irq(struct panfrost_device *pfdev);
int panfrost_job_is_idle(struct panfrost_device *pfdev);
#endif
diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c b/drivers/gpu/drm/panfrost/panfrost_mmu.c
index 846dd697c410..f38385fe76bb 100644
--- a/drivers/gpu/drm/panfrost/panfrost_mmu.c
+++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c
@@ -231,6 +231,8 @@ void panfrost_mmu_reset(struct panfrost_device *pfdev)
{
struct panfrost_mmu *mmu, *mmu_tmp;
+ clear_bit(PANFROST_COMP_BIT_MMU, pfdev->is_suspended);
+
spin_lock(&pfdev->as_lock);
pfdev->as_alloc_mask = 0;
@@ -670,6 +672,9 @@ static irqreturn_t panfrost_mmu_irq_handler(int irq, void *data)
{
struct panfrost_device *pfdev = data;
+ if (test_bit(PANFROST_COMP_BIT_MMU, pfdev->is_suspended))
+ return IRQ_NONE;
+
if (!mmu_read(pfdev, MMU_INT_STAT))
return IRQ_NONE;
@@ -744,22 +749,25 @@ static irqreturn_t panfrost_mmu_irq_handler_thread(int irq, void *data)
status = mmu_read(pfdev, MMU_INT_RAWSTAT) & ~pfdev->as_faulty_mask;
}
- spin_lock(&pfdev->as_lock);
- mmu_write(pfdev, MMU_INT_MASK, ~pfdev->as_faulty_mask);
- spin_unlock(&pfdev->as_lock);
+ /* Enable interrupts only if we're not about to get suspended */
+ if (!test_bit(PANFROST_COMP_BIT_MMU, pfdev->is_suspended)) {
+ spin_lock(&pfdev->as_lock);
+ mmu_write(pfdev, MMU_INT_MASK, ~pfdev->as_faulty_mask);
+ spin_unlock(&pfdev->as_lock);
+ }
return IRQ_HANDLED;
};
int panfrost_mmu_init(struct panfrost_device *pfdev)
{
- int err, irq;
+ int err;
- irq = platform_get_irq_byname(to_platform_device(pfdev->dev), "mmu");
- if (irq < 0)
- return irq;
+ pfdev->mmu_irq = platform_get_irq_byname(to_platform_device(pfdev->dev), "mmu");
+ if (pfdev->mmu_irq < 0)
+ return pfdev->mmu_irq;
- err = devm_request_threaded_irq(pfdev->dev, irq,
+ err = devm_request_threaded_irq(pfdev->dev, pfdev->mmu_irq,
panfrost_mmu_irq_handler,
panfrost_mmu_irq_handler_thread,
IRQF_SHARED, KBUILD_MODNAME "-mmu",
@@ -777,3 +785,11 @@ void panfrost_mmu_fini(struct panfrost_device *pfdev)
{
mmu_write(pfdev, MMU_INT_MASK, 0);
}
+
+void panfrost_mmu_suspend_irq(struct panfrost_device *pfdev)
+{
+ set_bit(PANFROST_COMP_BIT_MMU, pfdev->is_suspended);
+
+ mmu_write(pfdev, MMU_INT_MASK, 0);
+ synchronize_irq(pfdev->mmu_irq);
+}
diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.h b/drivers/gpu/drm/panfrost/panfrost_mmu.h
index cc2a0d307feb..022a9a74a114 100644
--- a/drivers/gpu/drm/panfrost/panfrost_mmu.h
+++ b/drivers/gpu/drm/panfrost/panfrost_mmu.h
@@ -14,6 +14,7 @@ void panfrost_mmu_unmap(struct panfrost_gem_mapping *mapping);
int panfrost_mmu_init(struct panfrost_device *pfdev);
void panfrost_mmu_fini(struct panfrost_device *pfdev);
void panfrost_mmu_reset(struct panfrost_device *pfdev);
+void panfrost_mmu_suspend_irq(struct panfrost_device *pfdev);
u32 panfrost_mmu_as_get(struct panfrost_device *pfdev, struct panfrost_mmu *mmu);
void panfrost_mmu_as_put(struct panfrost_device *pfdev, struct panfrost_mmu *mmu);
diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
index 404b0483bb7c..c6d35c33d5d6 100644
--- a/drivers/gpu/drm/qxl/qxl_display.c
+++ b/drivers/gpu/drm/qxl/qxl_display.c
@@ -485,7 +485,6 @@ static int qxl_primary_atomic_check(struct drm_plane *plane,
static int qxl_primary_apply_cursor(struct qxl_device *qdev,
struct drm_plane_state *plane_state)
{
- struct drm_framebuffer *fb = plane_state->fb;
struct qxl_crtc *qcrtc = to_qxl_crtc(plane_state->crtc);
struct qxl_cursor_cmd *cmd;
struct qxl_release *release;
@@ -510,8 +509,8 @@ static int qxl_primary_apply_cursor(struct qxl_device *qdev,
cmd = (struct qxl_cursor_cmd *)qxl_release_map(qdev, release);
cmd->type = QXL_CURSOR_SET;
- cmd->u.set.position.x = plane_state->crtc_x + fb->hot_x;
- cmd->u.set.position.y = plane_state->crtc_y + fb->hot_y;
+ cmd->u.set.position.x = plane_state->crtc_x + plane_state->hotspot_x;
+ cmd->u.set.position.y = plane_state->crtc_y + plane_state->hotspot_y;
cmd->u.set.shape = qxl_bo_physical_address(qdev, qcrtc->cursor_bo, 0);
@@ -531,7 +530,6 @@ out_free_release:
static int qxl_primary_move_cursor(struct qxl_device *qdev,
struct drm_plane_state *plane_state)
{
- struct drm_framebuffer *fb = plane_state->fb;
struct qxl_crtc *qcrtc = to_qxl_crtc(plane_state->crtc);
struct qxl_cursor_cmd *cmd;
struct qxl_release *release;
@@ -554,8 +552,8 @@ static int qxl_primary_move_cursor(struct qxl_device *qdev,
cmd = (struct qxl_cursor_cmd *)qxl_release_map(qdev, release);
cmd->type = QXL_CURSOR_MOVE;
- cmd->u.position.x = plane_state->crtc_x + fb->hot_x;
- cmd->u.position.y = plane_state->crtc_y + fb->hot_y;
+ cmd->u.position.x = plane_state->crtc_x + plane_state->hotspot_x;
+ cmd->u.position.y = plane_state->crtc_y + plane_state->hotspot_y;
qxl_release_unmap(qdev, release, &cmd->release_info);
qxl_release_fence_buffer_objects(release);
@@ -851,8 +849,8 @@ static int qxl_plane_prepare_fb(struct drm_plane *plane,
struct qxl_bo *old_cursor_bo = qcrtc->cursor_bo;
qcrtc->cursor_bo = qxl_create_cursor(qdev, user_bo,
- new_state->fb->hot_x,
- new_state->fb->hot_y);
+ new_state->hotspot_x,
+ new_state->hotspot_y);
qxl_free_cursor(old_cursor_bo);
}
diff --git a/drivers/gpu/drm/qxl/qxl_drv.c b/drivers/gpu/drm/qxl/qxl_drv.c
index 46de4f171970..beee5563031a 100644
--- a/drivers/gpu/drm/qxl/qxl_drv.c
+++ b/drivers/gpu/drm/qxl/qxl_drv.c
@@ -285,7 +285,7 @@ static const struct drm_ioctl_desc qxl_ioctls[] = {
};
static struct drm_driver qxl_driver = {
- .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
+ .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC | DRIVER_CURSOR_HOTSPOT,
.dumb_create = qxl_mode_dumb_create,
.dumb_map_offset = drm_gem_ttm_dumb_map_offset,
diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h
index 307a890fde13..32069acd93f8 100644
--- a/drivers/gpu/drm/qxl/qxl_drv.h
+++ b/drivers/gpu/drm/qxl/qxl_drv.h
@@ -119,7 +119,6 @@ struct qxl_output {
#define to_qxl_crtc(x) container_of(x, struct qxl_crtc, base)
#define drm_connector_to_qxl_output(x) container_of(x, struct qxl_output, base)
-#define drm_encoder_to_qxl_output(x) container_of(x, struct qxl_output, enc)
struct qxl_mman {
struct ttm_device bdev;
@@ -256,8 +255,6 @@ struct qxl_device {
#define to_qxl(dev) container_of(dev, struct qxl_device, ddev)
-int qxl_debugfs_fence_init(struct qxl_device *rdev);
-
int qxl_device_init(struct qxl_device *qdev, struct pci_dev *pdev);
void qxl_device_fini(struct qxl_device *qdev);
@@ -344,8 +341,6 @@ qxl_image_alloc_objects(struct qxl_device *qdev,
int height, int stride);
void qxl_image_free_objects(struct qxl_device *qdev, struct qxl_drm_image *dimage);
-void qxl_update_screen(struct qxl_device *qxl);
-
/* qxl io operations (qxl_cmd.c) */
void qxl_io_create_primary(struct qxl_device *qdev,
@@ -445,8 +440,6 @@ int qxl_hw_surface_dealloc(struct qxl_device *qdev,
int qxl_bo_check_id(struct qxl_device *qdev, struct qxl_bo *bo);
-struct qxl_drv_surface *
-qxl_surface_lookup(struct drm_device *dev, int surface_id);
void qxl_surface_evict(struct qxl_device *qdev, struct qxl_bo *surf, bool freeing);
/* qxl_ioctl.c */
diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
index 34a1c73d3938..02a65971d140 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.h
+++ b/drivers/gpu/drm/radeon/radeon_drv.h
@@ -33,7 +33,6 @@
#include <linux/firmware.h>
#include <linux/platform_device.h>
-#include <drm/drm_legacy.h>
#include "radeon_family.h"
diff --git a/drivers/gpu/drm/renesas/shmobile/shmob_drm_plane.c b/drivers/gpu/drm/renesas/shmobile/shmob_drm_plane.c
index 8f9a728affde..07ad17d24294 100644
--- a/drivers/gpu/drm/renesas/shmobile/shmob_drm_plane.c
+++ b/drivers/gpu/drm/renesas/shmobile/shmob_drm_plane.c
@@ -14,7 +14,6 @@
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_dma_helper.h>
-#include <drm/drm_plane_helper.h>
#include "shmob_drm_drv.h"
#include "shmob_drm_kms.h"
diff --git a/drivers/gpu/drm/scheduler/sched_entity.c b/drivers/gpu/drm/scheduler/sched_entity.c
index 4d42b1e4daa6..3c4f5a392b06 100644
--- a/drivers/gpu/drm/scheduler/sched_entity.c
+++ b/drivers/gpu/drm/scheduler/sched_entity.c
@@ -81,12 +81,16 @@ int drm_sched_entity_init(struct drm_sched_entity *entity,
*/
pr_warn("%s: called with uninitialized scheduler\n", __func__);
} else if (num_sched_list) {
- /* The "priority" of an entity cannot exceed the number
- * of run-queues of a scheduler.
+ /* The "priority" of an entity cannot exceed the number of run-queues of a
+ * scheduler. Protect against num_rqs being 0, by converting to signed. Choose
+ * the lowest priority available.
*/
- if (entity->priority >= sched_list[0]->num_rqs)
- entity->priority = max_t(u32, sched_list[0]->num_rqs,
- DRM_SCHED_PRIORITY_MIN);
+ if (entity->priority >= sched_list[0]->num_rqs) {
+ drm_err(sched_list[0], "entity with out-of-bounds priority:%u num_rqs:%u\n",
+ entity->priority, sched_list[0]->num_rqs);
+ entity->priority = max_t(s32, (s32) sched_list[0]->num_rqs - 1,
+ (s32) DRM_SCHED_PRIORITY_KERNEL);
+ }
entity->rq = sched_list[0]->sched_rq[entity->priority];
}
diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c
index 044a8c4875ba..550492a7a031 100644
--- a/drivers/gpu/drm/scheduler/sched_main.c
+++ b/drivers/gpu/drm/scheduler/sched_main.c
@@ -1029,9 +1029,8 @@ EXPORT_SYMBOL(drm_sched_job_cleanup);
void drm_sched_wakeup(struct drm_gpu_scheduler *sched,
struct drm_sched_entity *entity)
{
- if (drm_sched_entity_is_ready(entity))
- if (drm_sched_can_queue(sched, entity))
- drm_sched_run_job_queue(sched);
+ if (drm_sched_can_queue(sched, entity))
+ drm_sched_run_job_queue(sched);
}
/**
@@ -1051,8 +1050,9 @@ drm_sched_select_entity(struct drm_gpu_scheduler *sched)
struct drm_sched_entity *entity;
int i;
- /* Kernel run queue has higher priority than normal run queue*/
- for (i = sched->num_rqs - 1; i >= DRM_SCHED_PRIORITY_MIN; i--) {
+ /* Start with the highest priority.
+ */
+ for (i = DRM_SCHED_PRIORITY_KERNEL; i < sched->num_rqs; i++) {
entity = drm_sched_policy == DRM_SCHED_POLICY_FIFO ?
drm_sched_rq_select_entity_fifo(sched, sched->sched_rq[i]) :
drm_sched_rq_select_entity_rr(sched, sched->sched_rq[i]);
@@ -1291,7 +1291,7 @@ int drm_sched_init(struct drm_gpu_scheduler *sched,
if (!sched->sched_rq)
goto Out_free;
sched->num_rqs = num_rqs;
- for (i = DRM_SCHED_PRIORITY_MIN; i < sched->num_rqs; i++) {
+ for (i = DRM_SCHED_PRIORITY_KERNEL; i < sched->num_rqs; i++) {
sched->sched_rq[i] = kzalloc(sizeof(*sched->sched_rq[i]), GFP_KERNEL);
if (!sched->sched_rq[i])
goto Out_unroll;
@@ -1312,7 +1312,7 @@ int drm_sched_init(struct drm_gpu_scheduler *sched,
sched->ready = true;
return 0;
Out_unroll:
- for (--i ; i >= DRM_SCHED_PRIORITY_MIN; i--)
+ for (--i ; i >= DRM_SCHED_PRIORITY_KERNEL; i--)
kfree(sched->sched_rq[i]);
Out_free:
kfree(sched->sched_rq);
@@ -1338,7 +1338,7 @@ void drm_sched_fini(struct drm_gpu_scheduler *sched)
drm_sched_wqueue_stop(sched);
- for (i = sched->num_rqs - 1; i >= DRM_SCHED_PRIORITY_MIN; i--) {
+ for (i = DRM_SCHED_PRIORITY_KERNEL; i < sched->num_rqs; i++) {
struct drm_sched_rq *rq = sched->sched_rq[i];
spin_lock(&rq->lock);
@@ -1390,9 +1390,7 @@ void drm_sched_increase_karma(struct drm_sched_job *bad)
if (bad->s_priority != DRM_SCHED_PRIORITY_KERNEL) {
atomic_inc(&bad->karma);
- for (i = DRM_SCHED_PRIORITY_MIN;
- i < min_t(typeof(sched->num_rqs), sched->num_rqs, DRM_SCHED_PRIORITY_KERNEL);
- i++) {
+ for (i = DRM_SCHED_PRIORITY_HIGH; i < sched->num_rqs; i++) {
struct drm_sched_rq *rq = sched->sched_rq[i];
spin_lock(&rq->lock);
diff --git a/drivers/gpu/drm/solomon/ssd130x.h b/drivers/gpu/drm/solomon/ssd130x.h
index acf7cedf0c1a..075c5c3ee75a 100644
--- a/drivers/gpu/drm/solomon/ssd130x.h
+++ b/drivers/gpu/drm/solomon/ssd130x.h
@@ -17,7 +17,6 @@
#include <drm/drm_crtc.h>
#include <drm/drm_drv.h>
#include <drm/drm_encoder.h>
-#include <drm/drm_plane_helper.h>
#include <linux/regmap.h>
diff --git a/drivers/gpu/drm/tests/Makefile b/drivers/gpu/drm/tests/Makefile
index 2645af241ff0..d6183b3d7688 100644
--- a/drivers/gpu/drm/tests/Makefile
+++ b/drivers/gpu/drm/tests/Makefile
@@ -13,6 +13,7 @@ obj-$(CONFIG_DRM_KUNIT_TEST) += \
drm_format_helper_test.o \
drm_format_test.o \
drm_framebuffer_test.o \
+ drm_gem_shmem_test.o \
drm_managed_test.o \
drm_mm_test.o \
drm_modes_test.o \
diff --git a/drivers/gpu/drm/tests/drm_gem_shmem_test.c b/drivers/gpu/drm/tests/drm_gem_shmem_test.c
new file mode 100644
index 000000000000..91202e40cde9
--- /dev/null
+++ b/drivers/gpu/drm/tests/drm_gem_shmem_test.c
@@ -0,0 +1,383 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * KUnit test suite for GEM objects backed by shmem buffers
+ *
+ * Copyright (C) 2023 Red Hat, Inc.
+ *
+ * Author: Marco Pagani <[email protected]>
+ */
+
+#include <linux/dma-buf.h>
+#include <linux/iosys-map.h>
+#include <linux/sizes.h>
+
+#include <kunit/test.h>
+
+#include <drm/drm_device.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_gem.h>
+#include <drm/drm_gem_shmem_helper.h>
+#include <drm/drm_kunit_helpers.h>
+
+#define TEST_SIZE SZ_1M
+#define TEST_BYTE 0xae
+
+/*
+ * Wrappers to avoid an explicit type casting when passing action
+ * functions to kunit_add_action().
+ */
+static void kfree_wrapper(void *ptr)
+{
+ const void *obj = ptr;
+
+ kfree(obj);
+}
+
+static void sg_free_table_wrapper(void *ptr)
+{
+ struct sg_table *sgt = ptr;
+
+ sg_free_table(sgt);
+}
+
+static void drm_gem_shmem_free_wrapper(void *ptr)
+{
+ struct drm_gem_shmem_object *shmem = ptr;
+
+ drm_gem_shmem_free(shmem);
+}
+
+/*
+ * Test creating a shmem GEM object backed by shmem buffer. The test
+ * case succeeds if the GEM object is successfully allocated with the
+ * shmem file node and object functions attributes set, and the size
+ * attribute is equal to the correct size.
+ */
+static void drm_gem_shmem_test_obj_create(struct kunit *test)
+{
+ struct drm_device *drm_dev = test->priv;
+ struct drm_gem_shmem_object *shmem;
+
+ shmem = drm_gem_shmem_create(drm_dev, TEST_SIZE);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, shmem);
+ KUNIT_EXPECT_EQ(test, shmem->base.size, TEST_SIZE);
+ KUNIT_EXPECT_NOT_NULL(test, shmem->base.filp);
+ KUNIT_EXPECT_NOT_NULL(test, shmem->base.funcs);
+
+ drm_gem_shmem_free(shmem);
+}
+
+/*
+ * Test creating a shmem GEM object from a scatter/gather table exported
+ * via a DMA-BUF. The test case succeed if the GEM object is successfully
+ * created with the shmem file node attribute equal to NULL and the sgt
+ * attribute pointing to the scatter/gather table that has been imported.
+ */
+static void drm_gem_shmem_test_obj_create_private(struct kunit *test)
+{
+ struct drm_device *drm_dev = test->priv;
+ struct drm_gem_shmem_object *shmem;
+ struct drm_gem_object *gem_obj;
+ struct dma_buf buf_mock;
+ struct dma_buf_attachment attach_mock;
+ struct sg_table *sgt;
+ char *buf;
+ int ret;
+
+ /* Create a mock scatter/gather table */
+ buf = kunit_kzalloc(test, TEST_SIZE, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, buf);
+
+ sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, sgt);
+
+ ret = kunit_add_action_or_reset(test, kfree_wrapper, sgt);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ ret = kunit_add_action_or_reset(test, sg_free_table_wrapper, sgt);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ sg_init_one(sgt->sgl, buf, TEST_SIZE);
+
+ /* Init a mock DMA-BUF */
+ buf_mock.size = TEST_SIZE;
+ attach_mock.dmabuf = &buf_mock;
+
+ gem_obj = drm_gem_shmem_prime_import_sg_table(drm_dev, &attach_mock, sgt);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, gem_obj);
+ KUNIT_EXPECT_EQ(test, gem_obj->size, TEST_SIZE);
+ KUNIT_EXPECT_NULL(test, gem_obj->filp);
+ KUNIT_EXPECT_NOT_NULL(test, gem_obj->funcs);
+
+ /* The scatter/gather table will be freed by drm_gem_shmem_free */
+ kunit_remove_action(test, sg_free_table_wrapper, sgt);
+ kunit_remove_action(test, kfree_wrapper, sgt);
+
+ shmem = to_drm_gem_shmem_obj(gem_obj);
+ KUNIT_EXPECT_PTR_EQ(test, shmem->sgt, sgt);
+
+ drm_gem_shmem_free(shmem);
+}
+
+/*
+ * Test pinning backing pages for a shmem GEM object. The test case
+ * succeeds if a suitable number of backing pages are allocated, and
+ * the pages table counter attribute is increased by one.
+ */
+static void drm_gem_shmem_test_pin_pages(struct kunit *test)
+{
+ struct drm_device *drm_dev = test->priv;
+ struct drm_gem_shmem_object *shmem;
+ int i, ret;
+
+ shmem = drm_gem_shmem_create(drm_dev, TEST_SIZE);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, shmem);
+ KUNIT_EXPECT_NULL(test, shmem->pages);
+ KUNIT_EXPECT_EQ(test, shmem->pages_use_count, 0);
+
+ ret = kunit_add_action_or_reset(test, drm_gem_shmem_free_wrapper, shmem);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ ret = drm_gem_shmem_pin(shmem);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+ KUNIT_ASSERT_NOT_NULL(test, shmem->pages);
+ KUNIT_EXPECT_EQ(test, shmem->pages_use_count, 1);
+
+ for (i = 0; i < (shmem->base.size >> PAGE_SHIFT); i++)
+ KUNIT_ASSERT_NOT_NULL(test, shmem->pages[i]);
+
+ drm_gem_shmem_unpin(shmem);
+ KUNIT_EXPECT_NULL(test, shmem->pages);
+ KUNIT_EXPECT_EQ(test, shmem->pages_use_count, 0);
+}
+
+/*
+ * Test creating a virtual mapping for a shmem GEM object. The test
+ * case succeeds if the backing memory is mapped and the reference
+ * counter for virtual mapping is increased by one. Moreover, the test
+ * case writes and then reads a test pattern over the mapped memory.
+ */
+static void drm_gem_shmem_test_vmap(struct kunit *test)
+{
+ struct drm_device *drm_dev = test->priv;
+ struct drm_gem_shmem_object *shmem;
+ struct iosys_map map;
+ int ret, i;
+
+ shmem = drm_gem_shmem_create(drm_dev, TEST_SIZE);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, shmem);
+ KUNIT_EXPECT_NULL(test, shmem->vaddr);
+ KUNIT_EXPECT_EQ(test, shmem->vmap_use_count, 0);
+
+ ret = kunit_add_action_or_reset(test, drm_gem_shmem_free_wrapper, shmem);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ ret = drm_gem_shmem_vmap(shmem, &map);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+ KUNIT_ASSERT_NOT_NULL(test, shmem->vaddr);
+ KUNIT_ASSERT_FALSE(test, iosys_map_is_null(&map));
+ KUNIT_EXPECT_EQ(test, shmem->vmap_use_count, 1);
+
+ iosys_map_memset(&map, 0, TEST_BYTE, TEST_SIZE);
+ for (i = 0; i < TEST_SIZE; i++)
+ KUNIT_EXPECT_EQ(test, iosys_map_rd(&map, i, u8), TEST_BYTE);
+
+ drm_gem_shmem_vunmap(shmem, &map);
+ KUNIT_EXPECT_NULL(test, shmem->vaddr);
+ KUNIT_EXPECT_EQ(test, shmem->vmap_use_count, 0);
+}
+
+/*
+ * Test exporting a scatter/gather table of pinned pages suitable for
+ * PRIME usage from a shmem GEM object. The test case succeeds if a
+ * scatter/gather table large enough to accommodate the backing memory
+ * is successfully exported.
+ */
+static void drm_gem_shmem_test_get_pages_sgt(struct kunit *test)
+{
+ struct drm_device *drm_dev = test->priv;
+ struct drm_gem_shmem_object *shmem;
+ struct sg_table *sgt;
+ struct scatterlist *sg;
+ unsigned int si, len = 0;
+ int ret;
+
+ shmem = drm_gem_shmem_create(drm_dev, TEST_SIZE);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, shmem);
+
+ ret = kunit_add_action_or_reset(test, drm_gem_shmem_free_wrapper, shmem);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ ret = drm_gem_shmem_pin(shmem);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ sgt = drm_gem_shmem_get_sg_table(shmem);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, sgt);
+ KUNIT_EXPECT_NULL(test, shmem->sgt);
+
+ ret = kunit_add_action_or_reset(test, sg_free_table_wrapper, sgt);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ for_each_sgtable_sg(sgt, sg, si) {
+ KUNIT_EXPECT_NOT_NULL(test, sg);
+ len += sg->length;
+ }
+
+ KUNIT_EXPECT_GE(test, len, TEST_SIZE);
+}
+
+/*
+ * Test pinning pages and exporting a scatter/gather table suitable for
+ * driver usage from a shmem GEM object. The test case succeeds if the
+ * backing pages are pinned and a scatter/gather table large enough to
+ * accommodate the backing memory is successfully exported.
+ */
+static void drm_gem_shmem_test_get_sg_table(struct kunit *test)
+{
+ struct drm_device *drm_dev = test->priv;
+ struct drm_gem_shmem_object *shmem;
+ struct sg_table *sgt;
+ struct scatterlist *sg;
+ unsigned int si, ret, len = 0;
+
+ shmem = drm_gem_shmem_create(drm_dev, TEST_SIZE);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, shmem);
+
+ ret = kunit_add_action_or_reset(test, drm_gem_shmem_free_wrapper, shmem);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ /* The scatter/gather table will be freed by drm_gem_shmem_free */
+ sgt = drm_gem_shmem_get_pages_sgt(shmem);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, sgt);
+ KUNIT_ASSERT_NOT_NULL(test, shmem->pages);
+ KUNIT_EXPECT_EQ(test, shmem->pages_use_count, 1);
+ KUNIT_EXPECT_PTR_EQ(test, sgt, shmem->sgt);
+
+ for_each_sgtable_sg(sgt, sg, si) {
+ KUNIT_EXPECT_NOT_NULL(test, sg);
+ len += sg->length;
+ }
+
+ KUNIT_EXPECT_GE(test, len, TEST_SIZE);
+}
+
+/*
+ * Test updating the madvise state of a shmem GEM object. The test
+ * case checks that the function for setting madv updates it only if
+ * its current value is greater or equal than zero and returns false
+ * if it has a negative value.
+ */
+static void drm_gem_shmem_test_madvise(struct kunit *test)
+{
+ struct drm_device *drm_dev = test->priv;
+ struct drm_gem_shmem_object *shmem;
+ int ret;
+
+ shmem = drm_gem_shmem_create(drm_dev, TEST_SIZE);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, shmem);
+ KUNIT_ASSERT_EQ(test, shmem->madv, 0);
+
+ ret = kunit_add_action_or_reset(test, drm_gem_shmem_free_wrapper, shmem);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ ret = drm_gem_shmem_madvise(shmem, 1);
+ KUNIT_EXPECT_TRUE(test, ret);
+ KUNIT_ASSERT_EQ(test, shmem->madv, 1);
+
+ /* Set madv to a negative value */
+ ret = drm_gem_shmem_madvise(shmem, -1);
+ KUNIT_EXPECT_FALSE(test, ret);
+ KUNIT_ASSERT_EQ(test, shmem->madv, -1);
+
+ /* Check that madv cannot be set back to a positive value */
+ ret = drm_gem_shmem_madvise(shmem, 0);
+ KUNIT_EXPECT_FALSE(test, ret);
+ KUNIT_ASSERT_EQ(test, shmem->madv, -1);
+}
+
+/*
+ * Test purging a shmem GEM object. First, assert that a newly created
+ * shmem GEM object is not purgeable. Then, set madvise to a positive
+ * value and call drm_gem_shmem_get_pages_sgt() to pin and dma-map the
+ * backing pages. Finally, assert that the shmem GEM object is now
+ * purgeable and purge it.
+ */
+static void drm_gem_shmem_test_purge(struct kunit *test)
+{
+ struct drm_device *drm_dev = test->priv;
+ struct drm_gem_shmem_object *shmem;
+ struct sg_table *sgt;
+ int ret;
+
+ shmem = drm_gem_shmem_create(drm_dev, TEST_SIZE);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, shmem);
+
+ ret = kunit_add_action_or_reset(test, drm_gem_shmem_free_wrapper, shmem);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ ret = drm_gem_shmem_is_purgeable(shmem);
+ KUNIT_EXPECT_FALSE(test, ret);
+
+ ret = drm_gem_shmem_madvise(shmem, 1);
+ KUNIT_EXPECT_TRUE(test, ret);
+
+ /* The scatter/gather table will be freed by drm_gem_shmem_free */
+ sgt = drm_gem_shmem_get_pages_sgt(shmem);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, sgt);
+
+ ret = drm_gem_shmem_is_purgeable(shmem);
+ KUNIT_EXPECT_TRUE(test, ret);
+
+ drm_gem_shmem_purge(shmem);
+ KUNIT_EXPECT_NULL(test, shmem->pages);
+ KUNIT_EXPECT_NULL(test, shmem->sgt);
+ KUNIT_EXPECT_EQ(test, shmem->madv, -1);
+}
+
+static int drm_gem_shmem_test_init(struct kunit *test)
+{
+ struct device *dev;
+ struct drm_device *drm_dev;
+
+ /* Allocate a parent device */
+ dev = drm_kunit_helper_alloc_device(test);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dev);
+
+ /*
+ * The DRM core will automatically initialize the GEM core and create
+ * a DRM Memory Manager object which provides an address space pool
+ * for GEM objects allocation.
+ */
+ drm_dev = __drm_kunit_helper_alloc_drm_device(test, dev, sizeof(*drm_dev),
+ 0, DRIVER_GEM);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, drm_dev);
+
+ test->priv = drm_dev;
+
+ return 0;
+}
+
+static struct kunit_case drm_gem_shmem_test_cases[] = {
+ KUNIT_CASE(drm_gem_shmem_test_obj_create),
+ KUNIT_CASE(drm_gem_shmem_test_obj_create_private),
+ KUNIT_CASE(drm_gem_shmem_test_pin_pages),
+ KUNIT_CASE(drm_gem_shmem_test_vmap),
+ KUNIT_CASE(drm_gem_shmem_test_get_pages_sgt),
+ KUNIT_CASE(drm_gem_shmem_test_get_sg_table),
+ KUNIT_CASE(drm_gem_shmem_test_madvise),
+ KUNIT_CASE(drm_gem_shmem_test_purge),
+ {}
+};
+
+static struct kunit_suite drm_gem_shmem_suite = {
+ .name = "drm_gem_shmem",
+ .init = drm_gem_shmem_test_init,
+ .test_cases = drm_gem_shmem_test_cases
+};
+
+kunit_test_suite(drm_gem_shmem_suite);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/tidss/tidss_crtc.c b/drivers/gpu/drm/tidss/tidss_crtc.c
index 5e5e466f35d1..5f838980c7a1 100644
--- a/drivers/gpu/drm/tidss/tidss_crtc.c
+++ b/drivers/gpu/drm/tidss/tidss_crtc.c
@@ -169,14 +169,10 @@ static void tidss_crtc_atomic_flush(struct drm_crtc *crtc,
struct tidss_device *tidss = to_tidss(ddev);
unsigned long flags;
- dev_dbg(ddev->dev,
- "%s: %s enabled %d, needs modeset %d, event %p\n", __func__,
- crtc->name, drm_atomic_crtc_needs_modeset(crtc->state),
- crtc->state->enable, crtc->state->event);
-
- /* There is nothing to do if CRTC is not going to be enabled. */
- if (!crtc->state->enable)
- return;
+ dev_dbg(ddev->dev, "%s: %s is %sactive, %s modeset, event %p\n",
+ __func__, crtc->name, crtc->state->active ? "" : "not ",
+ drm_atomic_crtc_needs_modeset(crtc->state) ? "needs" : "doesn't need",
+ crtc->state->event);
/*
* Flush CRTC changes with go bit only if new modeset is not
diff --git a/drivers/gpu/drm/tidss/tidss_dispc.c b/drivers/gpu/drm/tidss/tidss_dispc.c
index 9d9dee7abaef..1ad711f8d2a8 100644
--- a/drivers/gpu/drm/tidss/tidss_dispc.c
+++ b/drivers/gpu/drm/tidss/tidss_dispc.c
@@ -322,6 +322,60 @@ const struct dispc_features dispc_am625_feats = {
.vid_order = { 1, 0 },
};
+const struct dispc_features dispc_am62a7_feats = {
+ /*
+ * if the code reaches dispc_mode_valid with VP1,
+ * it should return MODE_BAD.
+ */
+ .max_pclk_khz = {
+ [DISPC_VP_TIED_OFF] = 0,
+ [DISPC_VP_DPI] = 165000,
+ },
+
+ .scaling = {
+ .in_width_max_5tap_rgb = 1280,
+ .in_width_max_3tap_rgb = 2560,
+ .in_width_max_5tap_yuv = 2560,
+ .in_width_max_3tap_yuv = 4096,
+ .upscale_limit = 16,
+ .downscale_limit_5tap = 4,
+ .downscale_limit_3tap = 2,
+ /*
+ * The max supported pixel inc value is 255. The value
+ * of pixel inc is calculated like this: 1+(xinc-1)*bpp.
+ * The maximum bpp of all formats supported by the HW
+ * is 8. So the maximum supported xinc value is 32,
+ * because 1+(32-1)*8 < 255 < 1+(33-1)*4.
+ */
+ .xinc_max = 32,
+ },
+
+ .subrev = DISPC_AM62A7,
+
+ .common = "common",
+ .common_regs = tidss_am65x_common_regs,
+
+ .num_vps = 2,
+ .vp_name = { "vp1", "vp2" },
+ .ovr_name = { "ovr1", "ovr2" },
+ .vpclk_name = { "vp1", "vp2" },
+ /* VP1 of the DSS in AM62A7 SoC is tied off internally */
+ .vp_bus_type = { DISPC_VP_TIED_OFF, DISPC_VP_DPI },
+
+ .vp_feat = { .color = {
+ .has_ctm = true,
+ .gamma_size = 256,
+ .gamma_type = TIDSS_GAMMA_8BIT,
+ },
+ },
+
+ .num_planes = 2,
+ /* note: vid is plane_id 0 and vidl1 is plane_id 1 */
+ .vid_name = { "vid", "vidl1" },
+ .vid_lite = { false, true, },
+ .vid_order = { 1, 0 },
+};
+
static const u16 *dispc_common_regmap;
struct dss_vp_data {
@@ -824,6 +878,7 @@ dispc_irq_t dispc_read_and_clear_irqstatus(struct dispc_device *dispc)
case DISPC_K2G:
return dispc_k2g_read_and_clear_irqstatus(dispc);
case DISPC_AM625:
+ case DISPC_AM62A7:
case DISPC_AM65X:
case DISPC_J721E:
return dispc_k3_read_and_clear_irqstatus(dispc);
@@ -840,6 +895,7 @@ void dispc_set_irqenable(struct dispc_device *dispc, dispc_irq_t mask)
dispc_k2g_set_irqenable(dispc, mask);
break;
case DISPC_AM625:
+ case DISPC_AM62A7:
case DISPC_AM65X:
case DISPC_J721E:
dispc_k3_set_irqenable(dispc, mask);
@@ -1331,6 +1387,7 @@ void dispc_ovr_set_plane(struct dispc_device *dispc, u32 hw_plane,
x, y, layer);
break;
case DISPC_AM625:
+ case DISPC_AM62A7:
case DISPC_AM65X:
dispc_am65x_ovr_set_plane(dispc, hw_plane, hw_videoport,
x, y, layer);
@@ -2250,6 +2307,7 @@ static void dispc_plane_init(struct dispc_device *dispc)
dispc_k2g_plane_init(dispc);
break;
case DISPC_AM625:
+ case DISPC_AM62A7:
case DISPC_AM65X:
case DISPC_J721E:
dispc_k3_plane_init(dispc);
@@ -2357,6 +2415,7 @@ static void dispc_vp_write_gamma_table(struct dispc_device *dispc,
dispc_k2g_vp_write_gamma_table(dispc, hw_videoport);
break;
case DISPC_AM625:
+ case DISPC_AM62A7:
case DISPC_AM65X:
dispc_am65x_vp_write_gamma_table(dispc, hw_videoport);
break;
@@ -2702,18 +2761,83 @@ static void dispc_init_errata(struct dispc_device *dispc)
}
}
-static void dispc_softreset(struct dispc_device *dispc)
+/*
+ * K2G display controller does not support soft reset, so we do a basic manual
+ * reset here: make sure the IRQs are masked and VPs are disabled.
+ */
+static void dispc_softreset_k2g(struct dispc_device *dispc)
+{
+ dispc_set_irqenable(dispc, 0);
+ dispc_read_and_clear_irqstatus(dispc);
+
+ for (unsigned int vp_idx = 0; vp_idx < dispc->feat->num_vps; ++vp_idx)
+ VP_REG_FLD_MOD(dispc, vp_idx, DISPC_VP_CONTROL, 0, 0, 0);
+}
+
+static int dispc_softreset(struct dispc_device *dispc)
{
u32 val;
- int ret = 0;
+ int ret;
+
+ if (dispc->feat->subrev == DISPC_K2G) {
+ dispc_softreset_k2g(dispc);
+ return 0;
+ }
/* Soft reset */
REG_FLD_MOD(dispc, DSS_SYSCONFIG, 1, 1, 1);
/* Wait for reset to complete */
ret = readl_poll_timeout(dispc->base_common + DSS_SYSSTATUS,
val, val & 1, 100, 5000);
+ if (ret) {
+ dev_err(dispc->dev, "failed to reset dispc\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int dispc_init_hw(struct dispc_device *dispc)
+{
+ struct device *dev = dispc->dev;
+ int ret;
+
+ ret = pm_runtime_set_active(dev);
+ if (ret) {
+ dev_err(dev, "Failed to set DSS PM to active\n");
+ return ret;
+ }
+
+ ret = clk_prepare_enable(dispc->fclk);
+ if (ret) {
+ dev_err(dev, "Failed to enable DSS fclk\n");
+ goto err_runtime_suspend;
+ }
+
+ ret = dispc_softreset(dispc);
if (ret)
- dev_warn(dispc->dev, "failed to reset dispc\n");
+ goto err_clk_disable;
+
+ clk_disable_unprepare(dispc->fclk);
+ ret = pm_runtime_set_suspended(dev);
+ if (ret) {
+ dev_err(dev, "Failed to set DSS PM to suspended\n");
+ return ret;
+ }
+
+ return 0;
+
+err_clk_disable:
+ clk_disable_unprepare(dispc->fclk);
+
+err_runtime_suspend:
+ ret = pm_runtime_set_suspended(dev);
+ if (ret) {
+ dev_err(dev, "Failed to set DSS PM to suspended\n");
+ return ret;
+ }
+
+ return ret;
}
int dispc_init(struct tidss_device *tidss)
@@ -2777,10 +2901,6 @@ int dispc_init(struct tidss_device *tidss)
return r;
}
- /* K2G display controller does not support soft reset */
- if (feat->subrev != DISPC_K2G)
- dispc_softreset(dispc);
-
for (i = 0; i < dispc->feat->num_vps; i++) {
u32 gamma_size = dispc->feat->vp_feat.color.gamma_size;
u32 *gamma_table;
@@ -2829,6 +2949,10 @@ int dispc_init(struct tidss_device *tidss)
of_property_read_u32(dispc->dev->of_node, "max-memory-bandwidth",
&dispc->memory_bandwidth_limit);
+ r = dispc_init_hw(dispc);
+ if (r)
+ return r;
+
tidss->dispc = dispc;
return 0;
diff --git a/drivers/gpu/drm/tidss/tidss_dispc.h b/drivers/gpu/drm/tidss/tidss_dispc.h
index 33ac5ad7a423..086327d51a90 100644
--- a/drivers/gpu/drm/tidss/tidss_dispc.h
+++ b/drivers/gpu/drm/tidss/tidss_dispc.h
@@ -54,12 +54,14 @@ enum dispc_vp_bus_type {
DISPC_VP_DPI, /* DPI output */
DISPC_VP_OLDI, /* OLDI (LVDS) output */
DISPC_VP_INTERNAL, /* SoC internal routing */
+ DISPC_VP_TIED_OFF, /* Tied off / Unavailable */
DISPC_VP_MAX_BUS_TYPE,
};
enum dispc_dss_subrevision {
DISPC_K2G,
DISPC_AM625,
+ DISPC_AM62A7,
DISPC_AM65X,
DISPC_J721E,
};
@@ -88,6 +90,7 @@ struct dispc_features {
extern const struct dispc_features dispc_k2g_feats;
extern const struct dispc_features dispc_am625_feats;
+extern const struct dispc_features dispc_am62a7_feats;
extern const struct dispc_features dispc_am65x_feats;
extern const struct dispc_features dispc_j721e_feats;
diff --git a/drivers/gpu/drm/tidss/tidss_drv.c b/drivers/gpu/drm/tidss/tidss_drv.c
index 4d063eb9cd0b..d15f836dca95 100644
--- a/drivers/gpu/drm/tidss/tidss_drv.c
+++ b/drivers/gpu/drm/tidss/tidss_drv.c
@@ -32,9 +32,9 @@ int tidss_runtime_get(struct tidss_device *tidss)
dev_dbg(tidss->dev, "%s\n", __func__);
- r = pm_runtime_get_sync(tidss->dev);
+ r = pm_runtime_resume_and_get(tidss->dev);
WARN_ON(r < 0);
- return r < 0 ? r : 0;
+ return r;
}
void tidss_runtime_put(struct tidss_device *tidss)
@@ -43,7 +43,9 @@ void tidss_runtime_put(struct tidss_device *tidss)
dev_dbg(tidss->dev, "%s\n", __func__);
- r = pm_runtime_put_sync(tidss->dev);
+ pm_runtime_mark_last_busy(tidss->dev);
+
+ r = pm_runtime_put_autosuspend(tidss->dev);
WARN_ON(r < 0);
}
@@ -136,6 +138,8 @@ static int tidss_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, tidss);
+ spin_lock_init(&tidss->wait_lock);
+
ret = dispc_init(tidss);
if (ret) {
dev_err(dev, "failed to initialize dispc: %d\n", ret);
@@ -144,6 +148,9 @@ static int tidss_probe(struct platform_device *pdev)
pm_runtime_enable(dev);
+ pm_runtime_set_autosuspend_delay(dev, 1000);
+ pm_runtime_use_autosuspend(dev);
+
#ifndef CONFIG_PM
/* If we don't have PM, we need to call resume manually */
dispc_runtime_resume(tidss->dispc);
@@ -192,6 +199,7 @@ err_runtime_suspend:
#ifndef CONFIG_PM
dispc_runtime_suspend(tidss->dispc);
#endif
+ pm_runtime_dont_use_autosuspend(dev);
pm_runtime_disable(dev);
return ret;
@@ -215,6 +223,7 @@ static void tidss_remove(struct platform_device *pdev)
/* If we don't have PM, we need to call suspend manually */
dispc_runtime_suspend(tidss->dispc);
#endif
+ pm_runtime_dont_use_autosuspend(dev);
pm_runtime_disable(dev);
/* devm allocated dispc goes away with the dev so mark it NULL */
@@ -231,6 +240,7 @@ static void tidss_shutdown(struct platform_device *pdev)
static const struct of_device_id tidss_of_table[] = {
{ .compatible = "ti,k2g-dss", .data = &dispc_k2g_feats, },
{ .compatible = "ti,am625-dss", .data = &dispc_am625_feats, },
+ { .compatible = "ti,am62a7-dss", .data = &dispc_am62a7_feats, },
{ .compatible = "ti,am65x-dss", .data = &dispc_am65x_feats, },
{ .compatible = "ti,j721e-dss", .data = &dispc_j721e_feats, },
{ }
diff --git a/drivers/gpu/drm/tidss/tidss_irq.c b/drivers/gpu/drm/tidss/tidss_irq.c
index 0c681c7600bc..604334ef526a 100644
--- a/drivers/gpu/drm/tidss/tidss_irq.c
+++ b/drivers/gpu/drm/tidss/tidss_irq.c
@@ -93,33 +93,21 @@ void tidss_irq_resume(struct tidss_device *tidss)
spin_unlock_irqrestore(&tidss->wait_lock, flags);
}
-static void tidss_irq_preinstall(struct drm_device *ddev)
-{
- struct tidss_device *tidss = to_tidss(ddev);
-
- spin_lock_init(&tidss->wait_lock);
-
- tidss_runtime_get(tidss);
-
- dispc_set_irqenable(tidss->dispc, 0);
- dispc_read_and_clear_irqstatus(tidss->dispc);
-
- tidss_runtime_put(tidss);
-}
-
-static void tidss_irq_postinstall(struct drm_device *ddev)
+int tidss_irq_install(struct drm_device *ddev, unsigned int irq)
{
struct tidss_device *tidss = to_tidss(ddev);
- unsigned long flags;
- unsigned int i;
+ int ret;
- tidss_runtime_get(tidss);
+ if (irq == IRQ_NOTCONNECTED)
+ return -ENOTCONN;
- spin_lock_irqsave(&tidss->wait_lock, flags);
+ ret = request_irq(irq, tidss_irq_handler, 0, ddev->driver->name, ddev);
+ if (ret)
+ return ret;
tidss->irq_mask = DSS_IRQ_DEVICE_OCP_ERR;
- for (i = 0; i < tidss->num_crtcs; ++i) {
+ for (unsigned int i = 0; i < tidss->num_crtcs; ++i) {
struct tidss_crtc *tcrtc = to_tidss_crtc(tidss->crtcs[i]);
tidss->irq_mask |= DSS_IRQ_VP_SYNC_LOST(tcrtc->hw_videoport);
@@ -127,28 +115,6 @@ static void tidss_irq_postinstall(struct drm_device *ddev)
tidss->irq_mask |= DSS_IRQ_VP_FRAME_DONE(tcrtc->hw_videoport);
}
- tidss_irq_update(tidss);
-
- spin_unlock_irqrestore(&tidss->wait_lock, flags);
-
- tidss_runtime_put(tidss);
-}
-
-int tidss_irq_install(struct drm_device *ddev, unsigned int irq)
-{
- int ret;
-
- if (irq == IRQ_NOTCONNECTED)
- return -ENOTCONN;
-
- tidss_irq_preinstall(ddev);
-
- ret = request_irq(irq, tidss_irq_handler, 0, ddev->driver->name, ddev);
- if (ret)
- return ret;
-
- tidss_irq_postinstall(ddev);
-
return 0;
}
@@ -156,9 +122,5 @@ void tidss_irq_uninstall(struct drm_device *ddev)
{
struct tidss_device *tidss = to_tidss(ddev);
- tidss_runtime_get(tidss);
- dispc_set_irqenable(tidss->dispc, 0);
- tidss_runtime_put(tidss);
-
free_irq(tidss->irq, ddev);
}
diff --git a/drivers/gpu/drm/tidss/tidss_kms.c b/drivers/gpu/drm/tidss/tidss_kms.c
index d096d8d2bc8f..a0e494c806a9 100644
--- a/drivers/gpu/drm/tidss/tidss_kms.c
+++ b/drivers/gpu/drm/tidss/tidss_kms.c
@@ -29,7 +29,7 @@ static void tidss_atomic_commit_tail(struct drm_atomic_state *old_state)
tidss_runtime_get(tidss);
drm_atomic_helper_commit_modeset_disables(ddev, old_state);
- drm_atomic_helper_commit_planes(ddev, old_state, 0);
+ drm_atomic_helper_commit_planes(ddev, old_state, DRM_PLANE_COMMIT_ACTIVE_ONLY);
drm_atomic_helper_commit_modeset_enables(ddev, old_state);
drm_atomic_helper_commit_hw_done(old_state);
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_drv.c b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
index 2f6eaac7f659..23bf16f596f6 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_drv.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
@@ -570,19 +570,18 @@ static int tilcdc_pdev_probe(struct platform_device *pdev)
match);
}
-static int tilcdc_pdev_remove(struct platform_device *pdev)
+static void tilcdc_pdev_remove(struct platform_device *pdev)
{
int ret;
ret = tilcdc_get_external_components(&pdev->dev, NULL);
if (ret < 0)
- return ret;
+ dev_err(&pdev->dev, "tilcdc_get_external_components() failed (%pe)\n",
+ ERR_PTR(ret));
else if (ret == 0)
tilcdc_fini(platform_get_drvdata(pdev));
else
component_master_del(&pdev->dev, &tilcdc_comp_ops);
-
- return 0;
}
static void tilcdc_pdev_shutdown(struct platform_device *pdev)
@@ -599,7 +598,7 @@ MODULE_DEVICE_TABLE(of, tilcdc_of_match);
static struct platform_driver tilcdc_platform_driver = {
.probe = tilcdc_pdev_probe,
- .remove = tilcdc_pdev_remove,
+ .remove_new = tilcdc_pdev_remove,
.shutdown = tilcdc_pdev_shutdown,
.driver = {
.name = "tilcdc",
diff --git a/drivers/gpu/drm/tiny/ofdrm.c b/drivers/gpu/drm/tiny/ofdrm.c
index 05a72473cfc6..ab89b7fc7bf6 100644
--- a/drivers/gpu/drm/tiny/ofdrm.c
+++ b/drivers/gpu/drm/tiny/ofdrm.c
@@ -19,7 +19,6 @@
#include <drm/drm_gem_shmem_helper.h>
#include <drm/drm_managed.h>
#include <drm/drm_modeset_helper_vtables.h>
-#include <drm/drm_plane_helper.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_simple_kms_helper.h>
diff --git a/drivers/gpu/drm/tiny/simpledrm.c b/drivers/gpu/drm/tiny/simpledrm.c
index 34bbbd7b53dd..7ce1c4617675 100644
--- a/drivers/gpu/drm/tiny/simpledrm.c
+++ b/drivers/gpu/drm/tiny/simpledrm.c
@@ -25,7 +25,6 @@
#include <drm/drm_gem_shmem_helper.h>
#include <drm/drm_managed.h>
#include <drm/drm_modeset_helper_vtables.h>
-#include <drm/drm_plane_helper.h>
#include <drm/drm_probe_helper.h>
#define DRIVER_NAME "simpledrm"
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index e58b7e249816..edf10618fe2b 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -370,7 +370,13 @@ static void ttm_bo_release(struct kref *kref)
spin_unlock(&bo->bdev->lru_lock);
INIT_WORK(&bo->delayed_delete, ttm_bo_delayed_delete);
- queue_work(bdev->wq, &bo->delayed_delete);
+
+ /* Schedule the worker on the closest NUMA node. This
+ * improves performance since system memory might be
+ * cleared on free and that is best done on a CPU core
+ * close to it.
+ */
+ queue_work_node(bdev->pool.nid, bdev->wq, &bo->delayed_delete);
return;
}
diff --git a/drivers/gpu/drm/ttm/ttm_device.c b/drivers/gpu/drm/ttm/ttm_device.c
index d48b39132b32..f5187b384ae9 100644
--- a/drivers/gpu/drm/ttm/ttm_device.c
+++ b/drivers/gpu/drm/ttm/ttm_device.c
@@ -204,7 +204,8 @@ int ttm_device_init(struct ttm_device *bdev, const struct ttm_device_funcs *func
if (ret)
return ret;
- bdev->wq = alloc_workqueue("ttm", WQ_MEM_RECLAIM | WQ_HIGHPRI, 16);
+ bdev->wq = alloc_workqueue("ttm",
+ WQ_MEM_RECLAIM | WQ_HIGHPRI | WQ_UNBOUND, 16);
if (!bdev->wq) {
ttm_global_release();
return -ENOMEM;
@@ -213,7 +214,8 @@ int ttm_device_init(struct ttm_device *bdev, const struct ttm_device_funcs *func
bdev->funcs = funcs;
ttm_sys_man_init(bdev);
- ttm_pool_init(&bdev->pool, dev, NUMA_NO_NODE, use_dma_alloc, use_dma32);
+
+ ttm_pool_init(&bdev->pool, dev, dev_to_node(dev), use_dma_alloc, use_dma32);
bdev->vma_manager = vma_manager;
spin_lock_init(&bdev->lru_lock);
diff --git a/drivers/gpu/drm/udl/udl_modeset.c b/drivers/gpu/drm/udl/udl_modeset.c
index 40876bcdd79a..7702359c90c2 100644
--- a/drivers/gpu/drm/udl/udl_modeset.c
+++ b/drivers/gpu/drm/udl/udl_modeset.c
@@ -21,7 +21,6 @@
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_gem_shmem_helper.h>
#include <drm/drm_modeset_helper_vtables.h>
-#include <drm/drm_plane_helper.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_vblank.h>
@@ -261,6 +260,22 @@ static const uint64_t udl_primary_plane_fmtmods[] = {
DRM_FORMAT_MOD_INVALID
};
+static int udl_primary_plane_helper_atomic_check(struct drm_plane *plane,
+ struct drm_atomic_state *state)
+{
+ struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state, plane);
+ struct drm_crtc *new_crtc = new_plane_state->crtc;
+ struct drm_crtc_state *new_crtc_state = NULL;
+
+ if (new_crtc)
+ new_crtc_state = drm_atomic_get_new_crtc_state(state, new_crtc);
+
+ return drm_atomic_helper_check_plane_state(new_plane_state, new_crtc_state,
+ DRM_PLANE_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
+ false, false);
+}
+
static void udl_primary_plane_helper_atomic_update(struct drm_plane *plane,
struct drm_atomic_state *state)
{
@@ -296,7 +311,7 @@ out_drm_gem_fb_end_cpu_access:
static const struct drm_plane_helper_funcs udl_primary_plane_helper_funcs = {
DRM_GEM_SHADOW_PLANE_HELPER_FUNCS,
- .atomic_check = drm_plane_helper_atomic_check,
+ .atomic_check = udl_primary_plane_helper_atomic_check,
.atomic_update = udl_primary_plane_helper_atomic_update,
};
diff --git a/drivers/gpu/drm/v3d/Makefile b/drivers/gpu/drm/v3d/Makefile
index 4b21b20e4998..b7d673f1153b 100644
--- a/drivers/gpu/drm/v3d/Makefile
+++ b/drivers/gpu/drm/v3d/Makefile
@@ -12,7 +12,8 @@ v3d-y := \
v3d_perfmon.o \
v3d_trace_points.o \
v3d_sched.o \
- v3d_sysfs.o
+ v3d_sysfs.o \
+ v3d_submit.o
v3d-$(CONFIG_DEBUG_FS) += v3d_debugfs.o
diff --git a/drivers/gpu/drm/v3d/v3d_bo.c b/drivers/gpu/drm/v3d/v3d_bo.c
index 8b3229a37c6d..1bdfac8beafd 100644
--- a/drivers/gpu/drm/v3d/v3d_bo.c
+++ b/drivers/gpu/drm/v3d/v3d_bo.c
@@ -33,6 +33,9 @@ void v3d_free_object(struct drm_gem_object *obj)
struct v3d_dev *v3d = to_v3d_dev(obj->dev);
struct v3d_bo *bo = to_v3d_bo(obj);
+ if (bo->vaddr)
+ v3d_put_bo_vaddr(bo);
+
v3d_mmu_remove_ptes(bo);
mutex_lock(&v3d->bo_lock);
@@ -134,6 +137,7 @@ struct v3d_bo *v3d_bo_create(struct drm_device *dev, struct drm_file *file_priv,
if (IS_ERR(shmem_obj))
return ERR_CAST(shmem_obj);
bo = to_v3d_bo(&shmem_obj->base);
+ bo->vaddr = NULL;
ret = v3d_bo_create_finish(&shmem_obj->base);
if (ret)
@@ -167,6 +171,20 @@ v3d_prime_import_sg_table(struct drm_device *dev,
return obj;
}
+void v3d_get_bo_vaddr(struct v3d_bo *bo)
+{
+ struct drm_gem_shmem_object *obj = &bo->base;
+
+ bo->vaddr = vmap(obj->pages, obj->base.size >> PAGE_SHIFT, VM_MAP,
+ pgprot_writecombine(PAGE_KERNEL));
+}
+
+void v3d_put_bo_vaddr(struct v3d_bo *bo)
+{
+ vunmap(bo->vaddr);
+ bo->vaddr = NULL;
+}
+
int v3d_create_bo_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
@@ -233,3 +251,36 @@ int v3d_get_bo_offset_ioctl(struct drm_device *dev, void *data,
drm_gem_object_put(gem_obj);
return 0;
}
+
+int
+v3d_wait_bo_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ int ret;
+ struct drm_v3d_wait_bo *args = data;
+ ktime_t start = ktime_get();
+ u64 delta_ns;
+ unsigned long timeout_jiffies =
+ nsecs_to_jiffies_timeout(args->timeout_ns);
+
+ if (args->pad != 0)
+ return -EINVAL;
+
+ ret = drm_gem_dma_resv_wait(file_priv, args->handle,
+ true, timeout_jiffies);
+
+ /* Decrement the user's timeout, in case we got interrupted
+ * such that the ioctl will be restarted.
+ */
+ delta_ns = ktime_to_ns(ktime_sub(ktime_get(), start));
+ if (delta_ns < args->timeout_ns)
+ args->timeout_ns -= delta_ns;
+ else
+ args->timeout_ns = 0;
+
+ /* Asked to wait beyond the jiffie/scheduler precision? */
+ if (ret == -ETIME && args->timeout_ns)
+ ret = -EAGAIN;
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/v3d/v3d_drv.c b/drivers/gpu/drm/v3d/v3d_drv.c
index 44a1ca57d6a4..3debf37e7d9b 100644
--- a/drivers/gpu/drm/v3d/v3d_drv.c
+++ b/drivers/gpu/drm/v3d/v3d_drv.c
@@ -91,6 +91,9 @@ static int v3d_get_param_ioctl(struct drm_device *dev, void *data,
case DRM_V3D_PARAM_SUPPORTS_MULTISYNC_EXT:
args->value = 1;
return 0;
+ case DRM_V3D_PARAM_SUPPORTS_CPU_QUEUE:
+ args->value = 1;
+ return 0;
default:
DRM_DEBUG("Unknown parameter %d\n", args->param);
return -EINVAL;
@@ -189,6 +192,7 @@ static const struct drm_ioctl_desc v3d_drm_ioctls[] = {
DRM_IOCTL_DEF_DRV(V3D_PERFMON_CREATE, v3d_perfmon_create_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(V3D_PERFMON_DESTROY, v3d_perfmon_destroy_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(V3D_PERFMON_GET_VALUES, v3d_perfmon_get_values_ioctl, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(V3D_SUBMIT_CPU, v3d_submit_cpu_ioctl, DRM_RENDER_ALLOW | DRM_AUTH),
};
static const struct drm_driver v3d_drm_driver = {
diff --git a/drivers/gpu/drm/v3d/v3d_drv.h b/drivers/gpu/drm/v3d/v3d_drv.h
index 4c59fefaa0b4..3c7d58866570 100644
--- a/drivers/gpu/drm/v3d/v3d_drv.h
+++ b/drivers/gpu/drm/v3d/v3d_drv.h
@@ -19,7 +19,7 @@ struct reset_control;
#define GMP_GRANULARITY (128 * 1024)
-#define V3D_MAX_QUEUES (V3D_CACHE_CLEAN + 1)
+#define V3D_MAX_QUEUES (V3D_CPU + 1)
static inline char *v3d_queue_to_string(enum v3d_queue queue)
{
@@ -29,6 +29,7 @@ static inline char *v3d_queue_to_string(enum v3d_queue queue)
case V3D_TFU: return "tfu";
case V3D_CSD: return "csd";
case V3D_CACHE_CLEAN: return "cache_clean";
+ case V3D_CPU: return "cpu";
}
return "UNKNOWN";
}
@@ -122,6 +123,7 @@ struct v3d_dev {
struct v3d_render_job *render_job;
struct v3d_tfu_job *tfu_job;
struct v3d_csd_job *csd_job;
+ struct v3d_cpu_job *cpu_job;
struct v3d_queue_state queue[V3D_MAX_QUEUES];
@@ -200,6 +202,8 @@ struct v3d_bo {
* v3d_render_job->unref_list
*/
struct list_head unref_head;
+
+ void *vaddr;
};
static inline struct v3d_bo *
@@ -312,6 +316,112 @@ struct v3d_csd_job {
struct drm_v3d_submit_csd args;
};
+enum v3d_cpu_job_type {
+ V3D_CPU_JOB_TYPE_INDIRECT_CSD = 1,
+ V3D_CPU_JOB_TYPE_TIMESTAMP_QUERY,
+ V3D_CPU_JOB_TYPE_RESET_TIMESTAMP_QUERY,
+ V3D_CPU_JOB_TYPE_COPY_TIMESTAMP_QUERY,
+ V3D_CPU_JOB_TYPE_RESET_PERFORMANCE_QUERY,
+ V3D_CPU_JOB_TYPE_COPY_PERFORMANCE_QUERY,
+};
+
+struct v3d_timestamp_query {
+ /* Offset of this query in the timestamp BO for its value. */
+ u32 offset;
+
+ /* Syncobj that indicates the timestamp availability */
+ struct drm_syncobj *syncobj;
+};
+
+/* Number of perfmons required to handle all supported performance counters */
+#define V3D_MAX_PERFMONS DIV_ROUND_UP(V3D_PERFCNT_NUM, \
+ DRM_V3D_MAX_PERF_COUNTERS)
+
+struct v3d_performance_query {
+ /* Performance monitor IDs for this query */
+ u32 kperfmon_ids[V3D_MAX_PERFMONS];
+
+ /* Syncobj that indicates the query availability */
+ struct drm_syncobj *syncobj;
+};
+
+struct v3d_indirect_csd_info {
+ /* Indirect CSD */
+ struct v3d_csd_job *job;
+
+ /* Clean cache job associated to the Indirect CSD job */
+ struct v3d_job *clean_job;
+
+ /* Offset within the BO where the workgroup counts are stored */
+ u32 offset;
+
+ /* Workgroups size */
+ u32 wg_size;
+
+ /* Indices of the uniforms with the workgroup dispatch counts
+ * in the uniform stream.
+ */
+ u32 wg_uniform_offsets[3];
+
+ /* Indirect BO */
+ struct drm_gem_object *indirect;
+
+ /* Context of the Indirect CSD job */
+ struct ww_acquire_ctx acquire_ctx;
+};
+
+struct v3d_timestamp_query_info {
+ struct v3d_timestamp_query *queries;
+
+ u32 count;
+};
+
+struct v3d_performance_query_info {
+ struct v3d_performance_query *queries;
+
+ /* Number of performance queries */
+ u32 count;
+
+ /* Number of performance monitors related to that query pool */
+ u32 nperfmons;
+
+ /* Number of performance counters related to that query pool */
+ u32 ncounters;
+};
+
+struct v3d_copy_query_results_info {
+ /* Define if should write to buffer using 64 or 32 bits */
+ bool do_64bit;
+
+ /* Define if it can write to buffer even if the query is not available */
+ bool do_partial;
+
+ /* Define if it should write availability bit to buffer */
+ bool availability_bit;
+
+ /* Offset of the copy buffer in the BO */
+ u32 offset;
+
+ /* Stride of the copy buffer in the BO */
+ u32 stride;
+};
+
+struct v3d_cpu_job {
+ struct v3d_job base;
+
+ enum v3d_cpu_job_type job_type;
+
+ struct v3d_indirect_csd_info indirect_csd;
+
+ struct v3d_timestamp_query_info timestamp_query;
+
+ struct v3d_copy_query_results_info copy;
+
+ struct v3d_performance_query_info performance_query;
+};
+
+typedef void (*v3d_cpu_job_fn)(struct v3d_cpu_job *);
+
struct v3d_submit_outsync {
struct drm_syncobj *syncobj;
};
@@ -379,12 +489,16 @@ struct drm_gem_object *v3d_create_object(struct drm_device *dev, size_t size);
void v3d_free_object(struct drm_gem_object *gem_obj);
struct v3d_bo *v3d_bo_create(struct drm_device *dev, struct drm_file *file_priv,
size_t size);
+void v3d_get_bo_vaddr(struct v3d_bo *bo);
+void v3d_put_bo_vaddr(struct v3d_bo *bo);
int v3d_create_bo_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int v3d_mmap_bo_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int v3d_get_bo_offset_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
+int v3d_wait_bo_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
struct drm_gem_object *v3d_prime_import_sg_table(struct drm_device *dev,
struct dma_buf_attachment *attach,
struct sg_table *sgt);
@@ -399,19 +513,21 @@ struct dma_fence *v3d_fence_create(struct v3d_dev *v3d, enum v3d_queue queue);
/* v3d_gem.c */
int v3d_gem_init(struct drm_device *dev);
void v3d_gem_destroy(struct drm_device *dev);
+void v3d_reset(struct v3d_dev *v3d);
+void v3d_invalidate_caches(struct v3d_dev *v3d);
+void v3d_clean_caches(struct v3d_dev *v3d);
+
+/* v3d_submit.c */
+void v3d_job_cleanup(struct v3d_job *job);
+void v3d_job_put(struct v3d_job *job);
int v3d_submit_cl_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int v3d_submit_tfu_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int v3d_submit_csd_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
-int v3d_wait_bo_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-void v3d_job_cleanup(struct v3d_job *job);
-void v3d_job_put(struct v3d_job *job);
-void v3d_reset(struct v3d_dev *v3d);
-void v3d_invalidate_caches(struct v3d_dev *v3d);
-void v3d_clean_caches(struct v3d_dev *v3d);
+int v3d_submit_cpu_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
/* v3d_irq.c */
int v3d_irq_init(struct v3d_dev *v3d);
@@ -420,8 +536,6 @@ void v3d_irq_disable(struct v3d_dev *v3d);
void v3d_irq_reset(struct v3d_dev *v3d);
/* v3d_mmu.c */
-int v3d_mmu_get_offset(struct drm_file *file_priv, struct v3d_bo *bo,
- u32 *offset);
int v3d_mmu_set_page_table(struct v3d_dev *v3d);
void v3d_mmu_insert_ptes(struct v3d_bo *bo);
void v3d_mmu_remove_ptes(struct v3d_bo *bo);
diff --git a/drivers/gpu/drm/v3d/v3d_gem.c b/drivers/gpu/drm/v3d/v3d_gem.c
index 9d2ac23c29e3..afc565078c78 100644
--- a/drivers/gpu/drm/v3d/v3d_gem.c
+++ b/drivers/gpu/drm/v3d/v3d_gem.c
@@ -11,8 +11,6 @@
#include <linux/uaccess.h>
#include <drm/drm_managed.h>
-#include <drm/drm_syncobj.h>
-#include <uapi/drm/v3d_drm.h>
#include "v3d_drv.h"
#include "v3d_regs.h"
@@ -241,772 +239,6 @@ v3d_invalidate_caches(struct v3d_dev *v3d)
v3d_invalidate_slices(v3d, 0);
}
-/* Takes the reservation lock on all the BOs being referenced, so that
- * at queue submit time we can update the reservations.
- *
- * We don't lock the RCL the tile alloc/state BOs, or overflow memory
- * (all of which are on exec->unref_list). They're entirely private
- * to v3d, so we don't attach dma-buf fences to them.
- */
-static int
-v3d_lock_bo_reservations(struct v3d_job *job,
- struct ww_acquire_ctx *acquire_ctx)
-{
- int i, ret;
-
- ret = drm_gem_lock_reservations(job->bo, job->bo_count, acquire_ctx);
- if (ret)
- return ret;
-
- for (i = 0; i < job->bo_count; i++) {
- ret = dma_resv_reserve_fences(job->bo[i]->resv, 1);
- if (ret)
- goto fail;
-
- ret = drm_sched_job_add_implicit_dependencies(&job->base,
- job->bo[i], true);
- if (ret)
- goto fail;
- }
-
- return 0;
-
-fail:
- drm_gem_unlock_reservations(job->bo, job->bo_count, acquire_ctx);
- return ret;
-}
-
-/**
- * v3d_lookup_bos() - Sets up job->bo[] with the GEM objects
- * referenced by the job.
- * @dev: DRM device
- * @file_priv: DRM file for this fd
- * @job: V3D job being set up
- * @bo_handles: GEM handles
- * @bo_count: Number of GEM handles passed in
- *
- * The command validator needs to reference BOs by their index within
- * the submitted job's BO list. This does the validation of the job's
- * BO list and reference counting for the lifetime of the job.
- *
- * Note that this function doesn't need to unreference the BOs on
- * failure, because that will happen at v3d_exec_cleanup() time.
- */
-static int
-v3d_lookup_bos(struct drm_device *dev,
- struct drm_file *file_priv,
- struct v3d_job *job,
- u64 bo_handles,
- u32 bo_count)
-{
- job->bo_count = bo_count;
-
- if (!job->bo_count) {
- /* See comment on bo_index for why we have to check
- * this.
- */
- DRM_DEBUG("Rendering requires BOs\n");
- return -EINVAL;
- }
-
- return drm_gem_objects_lookup(file_priv,
- (void __user *)(uintptr_t)bo_handles,
- job->bo_count, &job->bo);
-}
-
-static void
-v3d_job_free(struct kref *ref)
-{
- struct v3d_job *job = container_of(ref, struct v3d_job, refcount);
- int i;
-
- if (job->bo) {
- for (i = 0; i < job->bo_count; i++)
- drm_gem_object_put(job->bo[i]);
- kvfree(job->bo);
- }
-
- dma_fence_put(job->irq_fence);
- dma_fence_put(job->done_fence);
-
- if (job->perfmon)
- v3d_perfmon_put(job->perfmon);
-
- kfree(job);
-}
-
-static void
-v3d_render_job_free(struct kref *ref)
-{
- struct v3d_render_job *job = container_of(ref, struct v3d_render_job,
- base.refcount);
- struct v3d_bo *bo, *save;
-
- list_for_each_entry_safe(bo, save, &job->unref_list, unref_head) {
- drm_gem_object_put(&bo->base.base);
- }
-
- v3d_job_free(ref);
-}
-
-void v3d_job_cleanup(struct v3d_job *job)
-{
- if (!job)
- return;
-
- drm_sched_job_cleanup(&job->base);
- v3d_job_put(job);
-}
-
-void v3d_job_put(struct v3d_job *job)
-{
- kref_put(&job->refcount, job->free);
-}
-
-int
-v3d_wait_bo_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- int ret;
- struct drm_v3d_wait_bo *args = data;
- ktime_t start = ktime_get();
- u64 delta_ns;
- unsigned long timeout_jiffies =
- nsecs_to_jiffies_timeout(args->timeout_ns);
-
- if (args->pad != 0)
- return -EINVAL;
-
- ret = drm_gem_dma_resv_wait(file_priv, args->handle,
- true, timeout_jiffies);
-
- /* Decrement the user's timeout, in case we got interrupted
- * such that the ioctl will be restarted.
- */
- delta_ns = ktime_to_ns(ktime_sub(ktime_get(), start));
- if (delta_ns < args->timeout_ns)
- args->timeout_ns -= delta_ns;
- else
- args->timeout_ns = 0;
-
- /* Asked to wait beyond the jiffie/scheduler precision? */
- if (ret == -ETIME && args->timeout_ns)
- ret = -EAGAIN;
-
- return ret;
-}
-
-static int
-v3d_job_init(struct v3d_dev *v3d, struct drm_file *file_priv,
- void **container, size_t size, void (*free)(struct kref *ref),
- u32 in_sync, struct v3d_submit_ext *se, enum v3d_queue queue)
-{
- struct v3d_file_priv *v3d_priv = file_priv->driver_priv;
- struct v3d_job *job;
- bool has_multisync = se && (se->flags & DRM_V3D_EXT_ID_MULTI_SYNC);
- int ret, i;
-
- *container = kcalloc(1, size, GFP_KERNEL);
- if (!*container) {
- DRM_ERROR("Cannot allocate memory for v3d job.");
- return -ENOMEM;
- }
-
- job = *container;
- job->v3d = v3d;
- job->free = free;
- job->file = file_priv;
-
- ret = drm_sched_job_init(&job->base, &v3d_priv->sched_entity[queue],
- 1, v3d_priv);
- if (ret)
- goto fail;
-
- if (has_multisync) {
- if (se->in_sync_count && se->wait_stage == queue) {
- struct drm_v3d_sem __user *handle = u64_to_user_ptr(se->in_syncs);
-
- for (i = 0; i < se->in_sync_count; i++) {
- struct drm_v3d_sem in;
-
- if (copy_from_user(&in, handle++, sizeof(in))) {
- ret = -EFAULT;
- DRM_DEBUG("Failed to copy wait dep handle.\n");
- goto fail_deps;
- }
- ret = drm_sched_job_add_syncobj_dependency(&job->base, file_priv, in.handle, 0);
-
- // TODO: Investigate why this was filtered out for the IOCTL.
- if (ret && ret != -ENOENT)
- goto fail_deps;
- }
- }
- } else {
- ret = drm_sched_job_add_syncobj_dependency(&job->base, file_priv, in_sync, 0);
-
- // TODO: Investigate why this was filtered out for the IOCTL.
- if (ret && ret != -ENOENT)
- goto fail_deps;
- }
-
- kref_init(&job->refcount);
-
- return 0;
-
-fail_deps:
- drm_sched_job_cleanup(&job->base);
-fail:
- kfree(*container);
- *container = NULL;
-
- return ret;
-}
-
-static void
-v3d_push_job(struct v3d_job *job)
-{
- drm_sched_job_arm(&job->base);
-
- job->done_fence = dma_fence_get(&job->base.s_fence->finished);
-
- /* put by scheduler job completion */
- kref_get(&job->refcount);
-
- drm_sched_entity_push_job(&job->base);
-}
-
-static void
-v3d_attach_fences_and_unlock_reservation(struct drm_file *file_priv,
- struct v3d_job *job,
- struct ww_acquire_ctx *acquire_ctx,
- u32 out_sync,
- struct v3d_submit_ext *se,
- struct dma_fence *done_fence)
-{
- struct drm_syncobj *sync_out;
- bool has_multisync = se && (se->flags & DRM_V3D_EXT_ID_MULTI_SYNC);
- int i;
-
- for (i = 0; i < job->bo_count; i++) {
- /* XXX: Use shared fences for read-only objects. */
- dma_resv_add_fence(job->bo[i]->resv, job->done_fence,
- DMA_RESV_USAGE_WRITE);
- }
-
- drm_gem_unlock_reservations(job->bo, job->bo_count, acquire_ctx);
-
- /* Update the return sync object for the job */
- /* If it only supports a single signal semaphore*/
- if (!has_multisync) {
- sync_out = drm_syncobj_find(file_priv, out_sync);
- if (sync_out) {
- drm_syncobj_replace_fence(sync_out, done_fence);
- drm_syncobj_put(sync_out);
- }
- return;
- }
-
- /* If multiple semaphores extension is supported */
- if (se->out_sync_count) {
- for (i = 0; i < se->out_sync_count; i++) {
- drm_syncobj_replace_fence(se->out_syncs[i].syncobj,
- done_fence);
- drm_syncobj_put(se->out_syncs[i].syncobj);
- }
- kvfree(se->out_syncs);
- }
-}
-
-static void
-v3d_put_multisync_post_deps(struct v3d_submit_ext *se)
-{
- unsigned int i;
-
- if (!(se && se->out_sync_count))
- return;
-
- for (i = 0; i < se->out_sync_count; i++)
- drm_syncobj_put(se->out_syncs[i].syncobj);
- kvfree(se->out_syncs);
-}
-
-static int
-v3d_get_multisync_post_deps(struct drm_file *file_priv,
- struct v3d_submit_ext *se,
- u32 count, u64 handles)
-{
- struct drm_v3d_sem __user *post_deps;
- int i, ret;
-
- if (!count)
- return 0;
-
- se->out_syncs = (struct v3d_submit_outsync *)
- kvmalloc_array(count,
- sizeof(struct v3d_submit_outsync),
- GFP_KERNEL);
- if (!se->out_syncs)
- return -ENOMEM;
-
- post_deps = u64_to_user_ptr(handles);
-
- for (i = 0; i < count; i++) {
- struct drm_v3d_sem out;
-
- if (copy_from_user(&out, post_deps++, sizeof(out))) {
- ret = -EFAULT;
- DRM_DEBUG("Failed to copy post dep handles\n");
- goto fail;
- }
-
- se->out_syncs[i].syncobj = drm_syncobj_find(file_priv,
- out.handle);
- if (!se->out_syncs[i].syncobj) {
- ret = -EINVAL;
- goto fail;
- }
- }
- se->out_sync_count = count;
-
- return 0;
-
-fail:
- for (i--; i >= 0; i--)
- drm_syncobj_put(se->out_syncs[i].syncobj);
- kvfree(se->out_syncs);
-
- return ret;
-}
-
-/* Get data for multiple binary semaphores synchronization. Parse syncobj
- * to be signaled when job completes (out_sync).
- */
-static int
-v3d_get_multisync_submit_deps(struct drm_file *file_priv,
- struct drm_v3d_extension __user *ext,
- void *data)
-{
- struct drm_v3d_multi_sync multisync;
- struct v3d_submit_ext *se = data;
- int ret;
-
- if (copy_from_user(&multisync, ext, sizeof(multisync)))
- return -EFAULT;
-
- if (multisync.pad)
- return -EINVAL;
-
- ret = v3d_get_multisync_post_deps(file_priv, data, multisync.out_sync_count,
- multisync.out_syncs);
- if (ret)
- return ret;
-
- se->in_sync_count = multisync.in_sync_count;
- se->in_syncs = multisync.in_syncs;
- se->flags |= DRM_V3D_EXT_ID_MULTI_SYNC;
- se->wait_stage = multisync.wait_stage;
-
- return 0;
-}
-
-/* Whenever userspace sets ioctl extensions, v3d_get_extensions parses data
- * according to the extension id (name).
- */
-static int
-v3d_get_extensions(struct drm_file *file_priv,
- u64 ext_handles,
- void *data)
-{
- struct drm_v3d_extension __user *user_ext;
- int ret;
-
- user_ext = u64_to_user_ptr(ext_handles);
- while (user_ext) {
- struct drm_v3d_extension ext;
-
- if (copy_from_user(&ext, user_ext, sizeof(ext))) {
- DRM_DEBUG("Failed to copy submit extension\n");
- return -EFAULT;
- }
-
- switch (ext.id) {
- case DRM_V3D_EXT_ID_MULTI_SYNC:
- ret = v3d_get_multisync_submit_deps(file_priv, user_ext, data);
- if (ret)
- return ret;
- break;
- default:
- DRM_DEBUG_DRIVER("Unknown extension id: %d\n", ext.id);
- return -EINVAL;
- }
-
- user_ext = u64_to_user_ptr(ext.next);
- }
-
- return 0;
-}
-
-/**
- * v3d_submit_cl_ioctl() - Submits a job (frame) to the V3D.
- * @dev: DRM device
- * @data: ioctl argument
- * @file_priv: DRM file for this fd
- *
- * This is the main entrypoint for userspace to submit a 3D frame to
- * the GPU. Userspace provides the binner command list (if
- * applicable), and the kernel sets up the render command list to draw
- * to the framebuffer described in the ioctl, using the command lists
- * that the 3D engine's binner will produce.
- */
-int
-v3d_submit_cl_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct v3d_dev *v3d = to_v3d_dev(dev);
- struct v3d_file_priv *v3d_priv = file_priv->driver_priv;
- struct drm_v3d_submit_cl *args = data;
- struct v3d_submit_ext se = {0};
- struct v3d_bin_job *bin = NULL;
- struct v3d_render_job *render = NULL;
- struct v3d_job *clean_job = NULL;
- struct v3d_job *last_job;
- struct ww_acquire_ctx acquire_ctx;
- int ret = 0;
-
- trace_v3d_submit_cl_ioctl(&v3d->drm, args->rcl_start, args->rcl_end);
-
- if (args->pad)
- return -EINVAL;
-
- if (args->flags &&
- args->flags & ~(DRM_V3D_SUBMIT_CL_FLUSH_CACHE |
- DRM_V3D_SUBMIT_EXTENSION)) {
- DRM_INFO("invalid flags: %d\n", args->flags);
- return -EINVAL;
- }
-
- if (args->flags & DRM_V3D_SUBMIT_EXTENSION) {
- ret = v3d_get_extensions(file_priv, args->extensions, &se);
- if (ret) {
- DRM_DEBUG("Failed to get extensions.\n");
- return ret;
- }
- }
-
- ret = v3d_job_init(v3d, file_priv, (void *)&render, sizeof(*render),
- v3d_render_job_free, args->in_sync_rcl, &se, V3D_RENDER);
- if (ret)
- goto fail;
-
- render->start = args->rcl_start;
- render->end = args->rcl_end;
- INIT_LIST_HEAD(&render->unref_list);
-
- if (args->bcl_start != args->bcl_end) {
- ret = v3d_job_init(v3d, file_priv, (void *)&bin, sizeof(*bin),
- v3d_job_free, args->in_sync_bcl, &se, V3D_BIN);
- if (ret)
- goto fail;
-
- bin->start = args->bcl_start;
- bin->end = args->bcl_end;
- bin->qma = args->qma;
- bin->qms = args->qms;
- bin->qts = args->qts;
- bin->render = render;
- }
-
- if (args->flags & DRM_V3D_SUBMIT_CL_FLUSH_CACHE) {
- ret = v3d_job_init(v3d, file_priv, (void *)&clean_job, sizeof(*clean_job),
- v3d_job_free, 0, NULL, V3D_CACHE_CLEAN);
- if (ret)
- goto fail;
-
- last_job = clean_job;
- } else {
- last_job = &render->base;
- }
-
- ret = v3d_lookup_bos(dev, file_priv, last_job,
- args->bo_handles, args->bo_handle_count);
- if (ret)
- goto fail;
-
- ret = v3d_lock_bo_reservations(last_job, &acquire_ctx);
- if (ret)
- goto fail;
-
- if (args->perfmon_id) {
- render->base.perfmon = v3d_perfmon_find(v3d_priv,
- args->perfmon_id);
-
- if (!render->base.perfmon) {
- ret = -ENOENT;
- goto fail_perfmon;
- }
- }
-
- mutex_lock(&v3d->sched_lock);
- if (bin) {
- bin->base.perfmon = render->base.perfmon;
- v3d_perfmon_get(bin->base.perfmon);
- v3d_push_job(&bin->base);
-
- ret = drm_sched_job_add_dependency(&render->base.base,
- dma_fence_get(bin->base.done_fence));
- if (ret)
- goto fail_unreserve;
- }
-
- v3d_push_job(&render->base);
-
- if (clean_job) {
- struct dma_fence *render_fence =
- dma_fence_get(render->base.done_fence);
- ret = drm_sched_job_add_dependency(&clean_job->base,
- render_fence);
- if (ret)
- goto fail_unreserve;
- clean_job->perfmon = render->base.perfmon;
- v3d_perfmon_get(clean_job->perfmon);
- v3d_push_job(clean_job);
- }
-
- mutex_unlock(&v3d->sched_lock);
-
- v3d_attach_fences_and_unlock_reservation(file_priv,
- last_job,
- &acquire_ctx,
- args->out_sync,
- &se,
- last_job->done_fence);
-
- if (bin)
- v3d_job_put(&bin->base);
- v3d_job_put(&render->base);
- if (clean_job)
- v3d_job_put(clean_job);
-
- return 0;
-
-fail_unreserve:
- mutex_unlock(&v3d->sched_lock);
-fail_perfmon:
- drm_gem_unlock_reservations(last_job->bo,
- last_job->bo_count, &acquire_ctx);
-fail:
- v3d_job_cleanup((void *)bin);
- v3d_job_cleanup((void *)render);
- v3d_job_cleanup(clean_job);
- v3d_put_multisync_post_deps(&se);
-
- return ret;
-}
-
-/**
- * v3d_submit_tfu_ioctl() - Submits a TFU (texture formatting) job to the V3D.
- * @dev: DRM device
- * @data: ioctl argument
- * @file_priv: DRM file for this fd
- *
- * Userspace provides the register setup for the TFU, which we don't
- * need to validate since the TFU is behind the MMU.
- */
-int
-v3d_submit_tfu_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct v3d_dev *v3d = to_v3d_dev(dev);
- struct drm_v3d_submit_tfu *args = data;
- struct v3d_submit_ext se = {0};
- struct v3d_tfu_job *job = NULL;
- struct ww_acquire_ctx acquire_ctx;
- int ret = 0;
-
- trace_v3d_submit_tfu_ioctl(&v3d->drm, args->iia);
-
- if (args->flags && !(args->flags & DRM_V3D_SUBMIT_EXTENSION)) {
- DRM_DEBUG("invalid flags: %d\n", args->flags);
- return -EINVAL;
- }
-
- if (args->flags & DRM_V3D_SUBMIT_EXTENSION) {
- ret = v3d_get_extensions(file_priv, args->extensions, &se);
- if (ret) {
- DRM_DEBUG("Failed to get extensions.\n");
- return ret;
- }
- }
-
- ret = v3d_job_init(v3d, file_priv, (void *)&job, sizeof(*job),
- v3d_job_free, args->in_sync, &se, V3D_TFU);
- if (ret)
- goto fail;
-
- job->base.bo = kcalloc(ARRAY_SIZE(args->bo_handles),
- sizeof(*job->base.bo), GFP_KERNEL);
- if (!job->base.bo) {
- ret = -ENOMEM;
- goto fail;
- }
-
- job->args = *args;
-
- for (job->base.bo_count = 0;
- job->base.bo_count < ARRAY_SIZE(args->bo_handles);
- job->base.bo_count++) {
- struct drm_gem_object *bo;
-
- if (!args->bo_handles[job->base.bo_count])
- break;
-
- bo = drm_gem_object_lookup(file_priv, args->bo_handles[job->base.bo_count]);
- if (!bo) {
- DRM_DEBUG("Failed to look up GEM BO %d: %d\n",
- job->base.bo_count,
- args->bo_handles[job->base.bo_count]);
- ret = -ENOENT;
- goto fail;
- }
- job->base.bo[job->base.bo_count] = bo;
- }
-
- ret = v3d_lock_bo_reservations(&job->base, &acquire_ctx);
- if (ret)
- goto fail;
-
- mutex_lock(&v3d->sched_lock);
- v3d_push_job(&job->base);
- mutex_unlock(&v3d->sched_lock);
-
- v3d_attach_fences_and_unlock_reservation(file_priv,
- &job->base, &acquire_ctx,
- args->out_sync,
- &se,
- job->base.done_fence);
-
- v3d_job_put(&job->base);
-
- return 0;
-
-fail:
- v3d_job_cleanup((void *)job);
- v3d_put_multisync_post_deps(&se);
-
- return ret;
-}
-
-/**
- * v3d_submit_csd_ioctl() - Submits a CSD (texture formatting) job to the V3D.
- * @dev: DRM device
- * @data: ioctl argument
- * @file_priv: DRM file for this fd
- *
- * Userspace provides the register setup for the CSD, which we don't
- * need to validate since the CSD is behind the MMU.
- */
-int
-v3d_submit_csd_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct v3d_dev *v3d = to_v3d_dev(dev);
- struct v3d_file_priv *v3d_priv = file_priv->driver_priv;
- struct drm_v3d_submit_csd *args = data;
- struct v3d_submit_ext se = {0};
- struct v3d_csd_job *job = NULL;
- struct v3d_job *clean_job = NULL;
- struct ww_acquire_ctx acquire_ctx;
- int ret;
-
- trace_v3d_submit_csd_ioctl(&v3d->drm, args->cfg[5], args->cfg[6]);
-
- if (args->pad)
- return -EINVAL;
-
- if (!v3d_has_csd(v3d)) {
- DRM_DEBUG("Attempting CSD submit on non-CSD hardware\n");
- return -EINVAL;
- }
-
- if (args->flags && !(args->flags & DRM_V3D_SUBMIT_EXTENSION)) {
- DRM_INFO("invalid flags: %d\n", args->flags);
- return -EINVAL;
- }
-
- if (args->flags & DRM_V3D_SUBMIT_EXTENSION) {
- ret = v3d_get_extensions(file_priv, args->extensions, &se);
- if (ret) {
- DRM_DEBUG("Failed to get extensions.\n");
- return ret;
- }
- }
-
- ret = v3d_job_init(v3d, file_priv, (void *)&job, sizeof(*job),
- v3d_job_free, args->in_sync, &se, V3D_CSD);
- if (ret)
- goto fail;
-
- ret = v3d_job_init(v3d, file_priv, (void *)&clean_job, sizeof(*clean_job),
- v3d_job_free, 0, NULL, V3D_CACHE_CLEAN);
- if (ret)
- goto fail;
-
- job->args = *args;
-
- ret = v3d_lookup_bos(dev, file_priv, clean_job,
- args->bo_handles, args->bo_handle_count);
- if (ret)
- goto fail;
-
- ret = v3d_lock_bo_reservations(clean_job, &acquire_ctx);
- if (ret)
- goto fail;
-
- if (args->perfmon_id) {
- job->base.perfmon = v3d_perfmon_find(v3d_priv,
- args->perfmon_id);
- if (!job->base.perfmon) {
- ret = -ENOENT;
- goto fail_perfmon;
- }
- }
-
- mutex_lock(&v3d->sched_lock);
- v3d_push_job(&job->base);
-
- ret = drm_sched_job_add_dependency(&clean_job->base,
- dma_fence_get(job->base.done_fence));
- if (ret)
- goto fail_unreserve;
-
- v3d_push_job(clean_job);
- mutex_unlock(&v3d->sched_lock);
-
- v3d_attach_fences_and_unlock_reservation(file_priv,
- clean_job,
- &acquire_ctx,
- args->out_sync,
- &se,
- clean_job->done_fence);
-
- v3d_job_put(&job->base);
- v3d_job_put(clean_job);
-
- return 0;
-
-fail_unreserve:
- mutex_unlock(&v3d->sched_lock);
-fail_perfmon:
- drm_gem_unlock_reservations(clean_job->bo, clean_job->bo_count,
- &acquire_ctx);
-fail:
- v3d_job_cleanup((void *)job);
- v3d_job_cleanup(clean_job);
- v3d_put_multisync_post_deps(&se);
-
- return ret;
-}
-
int
v3d_gem_init(struct drm_device *dev)
{
diff --git a/drivers/gpu/drm/v3d/v3d_sched.c b/drivers/gpu/drm/v3d/v3d_sched.c
index fccbea2a5f2e..54015ad765c7 100644
--- a/drivers/gpu/drm/v3d/v3d_sched.c
+++ b/drivers/gpu/drm/v3d/v3d_sched.c
@@ -21,10 +21,14 @@
#include <linux/sched/clock.h>
#include <linux/kthread.h>
+#include <drm/drm_syncobj.h>
+
#include "v3d_drv.h"
#include "v3d_regs.h"
#include "v3d_trace.h"
+#define V3D_CSD_CFG012_WG_COUNT_SHIFT 16
+
static struct v3d_job *
to_v3d_job(struct drm_sched_job *sched_job)
{
@@ -55,6 +59,12 @@ to_csd_job(struct drm_sched_job *sched_job)
return container_of(sched_job, struct v3d_csd_job, base.base);
}
+static struct v3d_cpu_job *
+to_cpu_job(struct drm_sched_job *sched_job)
+{
+ return container_of(sched_job, struct v3d_cpu_job, base.base);
+}
+
static void
v3d_sched_job_free(struct drm_sched_job *sched_job)
{
@@ -64,6 +74,28 @@ v3d_sched_job_free(struct drm_sched_job *sched_job)
}
static void
+v3d_cpu_job_free(struct drm_sched_job *sched_job)
+{
+ struct v3d_cpu_job *job = to_cpu_job(sched_job);
+ struct v3d_timestamp_query_info *timestamp_query = &job->timestamp_query;
+ struct v3d_performance_query_info *performance_query = &job->performance_query;
+
+ if (timestamp_query->queries) {
+ for (int i = 0; i < timestamp_query->count; i++)
+ drm_syncobj_put(timestamp_query->queries[i].syncobj);
+ kvfree(timestamp_query->queries);
+ }
+
+ if (performance_query->queries) {
+ for (int i = 0; i < performance_query->count; i++)
+ drm_syncobj_put(performance_query->queries[i].syncobj);
+ kvfree(performance_query->queries);
+ }
+
+ v3d_job_cleanup(&job->base);
+}
+
+static void
v3d_switch_perfmon(struct v3d_dev *v3d, struct v3d_job *job)
{
if (job->perfmon != v3d->active_perfmon)
@@ -262,6 +294,275 @@ v3d_csd_job_run(struct drm_sched_job *sched_job)
return fence;
}
+static void
+v3d_rewrite_csd_job_wg_counts_from_indirect(struct v3d_cpu_job *job)
+{
+ struct v3d_indirect_csd_info *indirect_csd = &job->indirect_csd;
+ struct v3d_bo *bo = to_v3d_bo(job->base.bo[0]);
+ struct v3d_bo *indirect = to_v3d_bo(indirect_csd->indirect);
+ struct drm_v3d_submit_csd *args = &indirect_csd->job->args;
+ u32 *wg_counts;
+
+ v3d_get_bo_vaddr(bo);
+ v3d_get_bo_vaddr(indirect);
+
+ wg_counts = (uint32_t *)(bo->vaddr + indirect_csd->offset);
+
+ if (wg_counts[0] == 0 || wg_counts[1] == 0 || wg_counts[2] == 0)
+ return;
+
+ args->cfg[0] = wg_counts[0] << V3D_CSD_CFG012_WG_COUNT_SHIFT;
+ args->cfg[1] = wg_counts[1] << V3D_CSD_CFG012_WG_COUNT_SHIFT;
+ args->cfg[2] = wg_counts[2] << V3D_CSD_CFG012_WG_COUNT_SHIFT;
+ args->cfg[4] = DIV_ROUND_UP(indirect_csd->wg_size, 16) *
+ (wg_counts[0] * wg_counts[1] * wg_counts[2]) - 1;
+
+ for (int i = 0; i < 3; i++) {
+ /* 0xffffffff indicates that the uniform rewrite is not needed */
+ if (indirect_csd->wg_uniform_offsets[i] != 0xffffffff) {
+ u32 uniform_idx = indirect_csd->wg_uniform_offsets[i];
+ ((uint32_t *)indirect->vaddr)[uniform_idx] = wg_counts[i];
+ }
+ }
+
+ v3d_put_bo_vaddr(indirect);
+ v3d_put_bo_vaddr(bo);
+}
+
+static void
+v3d_timestamp_query(struct v3d_cpu_job *job)
+{
+ struct v3d_timestamp_query_info *timestamp_query = &job->timestamp_query;
+ struct v3d_bo *bo = to_v3d_bo(job->base.bo[0]);
+ u8 *value_addr;
+
+ v3d_get_bo_vaddr(bo);
+
+ for (int i = 0; i < timestamp_query->count; i++) {
+ value_addr = ((u8 *)bo->vaddr) + timestamp_query->queries[i].offset;
+ *((u64 *)value_addr) = i == 0 ? ktime_get_ns() : 0ull;
+
+ drm_syncobj_replace_fence(timestamp_query->queries[i].syncobj,
+ job->base.done_fence);
+ }
+
+ v3d_put_bo_vaddr(bo);
+}
+
+static void
+v3d_reset_timestamp_queries(struct v3d_cpu_job *job)
+{
+ struct v3d_timestamp_query_info *timestamp_query = &job->timestamp_query;
+ struct v3d_timestamp_query *queries = timestamp_query->queries;
+ struct v3d_bo *bo = to_v3d_bo(job->base.bo[0]);
+ u8 *value_addr;
+
+ v3d_get_bo_vaddr(bo);
+
+ for (int i = 0; i < timestamp_query->count; i++) {
+ value_addr = ((u8 *)bo->vaddr) + queries[i].offset;
+ *((u64 *)value_addr) = 0;
+
+ drm_syncobj_replace_fence(queries[i].syncobj, NULL);
+ }
+
+ v3d_put_bo_vaddr(bo);
+}
+
+static void
+write_to_buffer(void *dst, u32 idx, bool do_64bit, u64 value)
+{
+ if (do_64bit) {
+ u64 *dst64 = (u64 *)dst;
+
+ dst64[idx] = value;
+ } else {
+ u32 *dst32 = (u32 *)dst;
+
+ dst32[idx] = (u32)value;
+ }
+}
+
+static void
+v3d_copy_query_results(struct v3d_cpu_job *job)
+{
+ struct v3d_timestamp_query_info *timestamp_query = &job->timestamp_query;
+ struct v3d_timestamp_query *queries = timestamp_query->queries;
+ struct v3d_bo *bo = to_v3d_bo(job->base.bo[0]);
+ struct v3d_bo *timestamp = to_v3d_bo(job->base.bo[1]);
+ struct v3d_copy_query_results_info *copy = &job->copy;
+ struct dma_fence *fence;
+ u8 *query_addr;
+ bool available, write_result;
+ u8 *data;
+ int i;
+
+ v3d_get_bo_vaddr(bo);
+ v3d_get_bo_vaddr(timestamp);
+
+ data = ((u8 *)bo->vaddr) + copy->offset;
+
+ for (i = 0; i < timestamp_query->count; i++) {
+ fence = drm_syncobj_fence_get(queries[i].syncobj);
+ available = fence ? dma_fence_is_signaled(fence) : false;
+
+ write_result = available || copy->do_partial;
+ if (write_result) {
+ query_addr = ((u8 *)timestamp->vaddr) + queries[i].offset;
+ write_to_buffer(data, 0, copy->do_64bit, *((u64 *)query_addr));
+ }
+
+ if (copy->availability_bit)
+ write_to_buffer(data, 1, copy->do_64bit, available ? 1u : 0u);
+
+ data += copy->stride;
+
+ dma_fence_put(fence);
+ }
+
+ v3d_put_bo_vaddr(timestamp);
+ v3d_put_bo_vaddr(bo);
+}
+
+static void
+v3d_reset_performance_queries(struct v3d_cpu_job *job)
+{
+ struct v3d_performance_query_info *performance_query = &job->performance_query;
+ struct v3d_file_priv *v3d_priv = job->base.file->driver_priv;
+ struct v3d_dev *v3d = job->base.v3d;
+ struct v3d_perfmon *perfmon;
+
+ for (int i = 0; i < performance_query->count; i++) {
+ for (int j = 0; j < performance_query->nperfmons; j++) {
+ perfmon = v3d_perfmon_find(v3d_priv,
+ performance_query->queries[i].kperfmon_ids[j]);
+ if (!perfmon) {
+ DRM_DEBUG("Failed to find perfmon.");
+ continue;
+ }
+
+ v3d_perfmon_stop(v3d, perfmon, false);
+
+ memset(perfmon->values, 0, perfmon->ncounters * sizeof(u64));
+
+ v3d_perfmon_put(perfmon);
+ }
+
+ drm_syncobj_replace_fence(performance_query->queries[i].syncobj, NULL);
+ }
+}
+
+static void
+v3d_write_performance_query_result(struct v3d_cpu_job *job, void *data, u32 query)
+{
+ struct v3d_performance_query_info *performance_query = &job->performance_query;
+ struct v3d_copy_query_results_info *copy = &job->copy;
+ struct v3d_file_priv *v3d_priv = job->base.file->driver_priv;
+ struct v3d_dev *v3d = job->base.v3d;
+ struct v3d_perfmon *perfmon;
+ u64 counter_values[V3D_PERFCNT_NUM];
+
+ for (int i = 0; i < performance_query->nperfmons; i++) {
+ perfmon = v3d_perfmon_find(v3d_priv,
+ performance_query->queries[query].kperfmon_ids[i]);
+ if (!perfmon) {
+ DRM_DEBUG("Failed to find perfmon.");
+ continue;
+ }
+
+ v3d_perfmon_stop(v3d, perfmon, true);
+
+ memcpy(&counter_values[i * DRM_V3D_MAX_PERF_COUNTERS], perfmon->values,
+ perfmon->ncounters * sizeof(u64));
+
+ v3d_perfmon_put(perfmon);
+ }
+
+ for (int i = 0; i < performance_query->ncounters; i++)
+ write_to_buffer(data, i, copy->do_64bit, counter_values[i]);
+}
+
+static void
+v3d_copy_performance_query(struct v3d_cpu_job *job)
+{
+ struct v3d_performance_query_info *performance_query = &job->performance_query;
+ struct v3d_copy_query_results_info *copy = &job->copy;
+ struct v3d_bo *bo = to_v3d_bo(job->base.bo[0]);
+ struct dma_fence *fence;
+ bool available, write_result;
+ u8 *data;
+
+ v3d_get_bo_vaddr(bo);
+
+ data = ((u8 *)bo->vaddr) + copy->offset;
+
+ for (int i = 0; i < performance_query->count; i++) {
+ fence = drm_syncobj_fence_get(performance_query->queries[i].syncobj);
+ available = fence ? dma_fence_is_signaled(fence) : false;
+
+ write_result = available || copy->do_partial;
+ if (write_result)
+ v3d_write_performance_query_result(job, data, i);
+
+ if (copy->availability_bit)
+ write_to_buffer(data, performance_query->ncounters,
+ copy->do_64bit, available ? 1u : 0u);
+
+ data += copy->stride;
+
+ dma_fence_put(fence);
+ }
+
+ v3d_put_bo_vaddr(bo);
+}
+
+static const v3d_cpu_job_fn cpu_job_function[] = {
+ [V3D_CPU_JOB_TYPE_INDIRECT_CSD] = v3d_rewrite_csd_job_wg_counts_from_indirect,
+ [V3D_CPU_JOB_TYPE_TIMESTAMP_QUERY] = v3d_timestamp_query,
+ [V3D_CPU_JOB_TYPE_RESET_TIMESTAMP_QUERY] = v3d_reset_timestamp_queries,
+ [V3D_CPU_JOB_TYPE_COPY_TIMESTAMP_QUERY] = v3d_copy_query_results,
+ [V3D_CPU_JOB_TYPE_RESET_PERFORMANCE_QUERY] = v3d_reset_performance_queries,
+ [V3D_CPU_JOB_TYPE_COPY_PERFORMANCE_QUERY] = v3d_copy_performance_query,
+};
+
+static struct dma_fence *
+v3d_cpu_job_run(struct drm_sched_job *sched_job)
+{
+ struct v3d_cpu_job *job = to_cpu_job(sched_job);
+ struct v3d_dev *v3d = job->base.v3d;
+ struct v3d_file_priv *file = job->base.file->driver_priv;
+ u64 runtime;
+
+ v3d->cpu_job = job;
+
+ if (job->job_type >= ARRAY_SIZE(cpu_job_function)) {
+ DRM_DEBUG_DRIVER("Unknown CPU job: %d\n", job->job_type);
+ return NULL;
+ }
+
+ file->start_ns[V3D_CPU] = local_clock();
+ v3d->queue[V3D_CPU].start_ns = file->start_ns[V3D_CPU];
+
+ trace_v3d_cpu_job_begin(&v3d->drm, job->job_type);
+
+ cpu_job_function[job->job_type](job);
+
+ trace_v3d_cpu_job_end(&v3d->drm, job->job_type);
+
+ runtime = local_clock() - file->start_ns[V3D_CPU];
+
+ file->enabled_ns[V3D_CPU] += runtime;
+ v3d->queue[V3D_CPU].enabled_ns += runtime;
+
+ file->jobs_sent[V3D_CPU]++;
+ v3d->queue[V3D_CPU].jobs_sent++;
+
+ file->start_ns[V3D_CPU] = 0;
+ v3d->queue[V3D_CPU].start_ns = 0;
+
+ return NULL;
+}
+
static struct dma_fence *
v3d_cache_clean_job_run(struct drm_sched_job *sched_job)
{
@@ -416,6 +717,12 @@ static const struct drm_sched_backend_ops v3d_cache_clean_sched_ops = {
.free_job = v3d_sched_job_free
};
+static const struct drm_sched_backend_ops v3d_cpu_sched_ops = {
+ .run_job = v3d_cpu_job_run,
+ .timedout_job = v3d_generic_job_timedout,
+ .free_job = v3d_cpu_job_free
+};
+
int
v3d_sched_init(struct v3d_dev *v3d)
{
@@ -471,6 +778,15 @@ v3d_sched_init(struct v3d_dev *v3d)
goto fail;
}
+ ret = drm_sched_init(&v3d->queue[V3D_CPU].sched,
+ &v3d_cpu_sched_ops, NULL,
+ DRM_SCHED_PRIORITY_COUNT,
+ 1, job_hang_limit,
+ msecs_to_jiffies(hang_limit_ms), NULL,
+ NULL, "v3d_cpu", v3d->drm.dev);
+ if (ret)
+ goto fail;
+
return 0;
fail:
diff --git a/drivers/gpu/drm/v3d/v3d_submit.c b/drivers/gpu/drm/v3d/v3d_submit.c
new file mode 100644
index 000000000000..fcff41dd2315
--- /dev/null
+++ b/drivers/gpu/drm/v3d/v3d_submit.c
@@ -0,0 +1,1320 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2014-2018 Broadcom
+ * Copyright (C) 2023 Raspberry Pi
+ */
+
+#include <drm/drm_syncobj.h>
+
+#include "v3d_drv.h"
+#include "v3d_regs.h"
+#include "v3d_trace.h"
+
+/* Takes the reservation lock on all the BOs being referenced, so that
+ * at queue submit time we can update the reservations.
+ *
+ * We don't lock the RCL the tile alloc/state BOs, or overflow memory
+ * (all of which are on exec->unref_list). They're entirely private
+ * to v3d, so we don't attach dma-buf fences to them.
+ */
+static int
+v3d_lock_bo_reservations(struct v3d_job *job,
+ struct ww_acquire_ctx *acquire_ctx)
+{
+ int i, ret;
+
+ ret = drm_gem_lock_reservations(job->bo, job->bo_count, acquire_ctx);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < job->bo_count; i++) {
+ ret = dma_resv_reserve_fences(job->bo[i]->resv, 1);
+ if (ret)
+ goto fail;
+
+ ret = drm_sched_job_add_implicit_dependencies(&job->base,
+ job->bo[i], true);
+ if (ret)
+ goto fail;
+ }
+
+ return 0;
+
+fail:
+ drm_gem_unlock_reservations(job->bo, job->bo_count, acquire_ctx);
+ return ret;
+}
+
+/**
+ * v3d_lookup_bos() - Sets up job->bo[] with the GEM objects
+ * referenced by the job.
+ * @dev: DRM device
+ * @file_priv: DRM file for this fd
+ * @job: V3D job being set up
+ * @bo_handles: GEM handles
+ * @bo_count: Number of GEM handles passed in
+ *
+ * The command validator needs to reference BOs by their index within
+ * the submitted job's BO list. This does the validation of the job's
+ * BO list and reference counting for the lifetime of the job.
+ *
+ * Note that this function doesn't need to unreference the BOs on
+ * failure, because that will happen at v3d_exec_cleanup() time.
+ */
+static int
+v3d_lookup_bos(struct drm_device *dev,
+ struct drm_file *file_priv,
+ struct v3d_job *job,
+ u64 bo_handles,
+ u32 bo_count)
+{
+ job->bo_count = bo_count;
+
+ if (!job->bo_count) {
+ /* See comment on bo_index for why we have to check
+ * this.
+ */
+ DRM_DEBUG("Rendering requires BOs\n");
+ return -EINVAL;
+ }
+
+ return drm_gem_objects_lookup(file_priv,
+ (void __user *)(uintptr_t)bo_handles,
+ job->bo_count, &job->bo);
+}
+
+static void
+v3d_job_free(struct kref *ref)
+{
+ struct v3d_job *job = container_of(ref, struct v3d_job, refcount);
+ int i;
+
+ if (job->bo) {
+ for (i = 0; i < job->bo_count; i++)
+ drm_gem_object_put(job->bo[i]);
+ kvfree(job->bo);
+ }
+
+ dma_fence_put(job->irq_fence);
+ dma_fence_put(job->done_fence);
+
+ if (job->perfmon)
+ v3d_perfmon_put(job->perfmon);
+
+ kfree(job);
+}
+
+static void
+v3d_render_job_free(struct kref *ref)
+{
+ struct v3d_render_job *job = container_of(ref, struct v3d_render_job,
+ base.refcount);
+ struct v3d_bo *bo, *save;
+
+ list_for_each_entry_safe(bo, save, &job->unref_list, unref_head) {
+ drm_gem_object_put(&bo->base.base);
+ }
+
+ v3d_job_free(ref);
+}
+
+void v3d_job_cleanup(struct v3d_job *job)
+{
+ if (!job)
+ return;
+
+ drm_sched_job_cleanup(&job->base);
+ v3d_job_put(job);
+}
+
+void v3d_job_put(struct v3d_job *job)
+{
+ if (!job)
+ return;
+
+ kref_put(&job->refcount, job->free);
+}
+
+static int
+v3d_job_allocate(void **container, size_t size)
+{
+ *container = kcalloc(1, size, GFP_KERNEL);
+ if (!*container) {
+ DRM_ERROR("Cannot allocate memory for V3D job.\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static int
+v3d_job_init(struct v3d_dev *v3d, struct drm_file *file_priv,
+ struct v3d_job *job, void (*free)(struct kref *ref),
+ u32 in_sync, struct v3d_submit_ext *se, enum v3d_queue queue)
+{
+ struct v3d_file_priv *v3d_priv = file_priv->driver_priv;
+ bool has_multisync = se && (se->flags & DRM_V3D_EXT_ID_MULTI_SYNC);
+ int ret, i;
+
+ job->v3d = v3d;
+ job->free = free;
+ job->file = file_priv;
+
+ ret = drm_sched_job_init(&job->base, &v3d_priv->sched_entity[queue],
+ 1, v3d_priv);
+ if (ret)
+ return ret;
+
+ if (has_multisync) {
+ if (se->in_sync_count && se->wait_stage == queue) {
+ struct drm_v3d_sem __user *handle = u64_to_user_ptr(se->in_syncs);
+
+ for (i = 0; i < se->in_sync_count; i++) {
+ struct drm_v3d_sem in;
+
+ if (copy_from_user(&in, handle++, sizeof(in))) {
+ ret = -EFAULT;
+ DRM_DEBUG("Failed to copy wait dep handle.\n");
+ goto fail_deps;
+ }
+ ret = drm_sched_job_add_syncobj_dependency(&job->base, file_priv, in.handle, 0);
+
+ // TODO: Investigate why this was filtered out for the IOCTL.
+ if (ret && ret != -ENOENT)
+ goto fail_deps;
+ }
+ }
+ } else {
+ ret = drm_sched_job_add_syncobj_dependency(&job->base, file_priv, in_sync, 0);
+
+ // TODO: Investigate why this was filtered out for the IOCTL.
+ if (ret && ret != -ENOENT)
+ goto fail_deps;
+ }
+
+ kref_init(&job->refcount);
+
+ return 0;
+
+fail_deps:
+ drm_sched_job_cleanup(&job->base);
+ return ret;
+}
+
+static void
+v3d_push_job(struct v3d_job *job)
+{
+ drm_sched_job_arm(&job->base);
+
+ job->done_fence = dma_fence_get(&job->base.s_fence->finished);
+
+ /* put by scheduler job completion */
+ kref_get(&job->refcount);
+
+ drm_sched_entity_push_job(&job->base);
+}
+
+static void
+v3d_attach_fences_and_unlock_reservation(struct drm_file *file_priv,
+ struct v3d_job *job,
+ struct ww_acquire_ctx *acquire_ctx,
+ u32 out_sync,
+ struct v3d_submit_ext *se,
+ struct dma_fence *done_fence)
+{
+ struct drm_syncobj *sync_out;
+ bool has_multisync = se && (se->flags & DRM_V3D_EXT_ID_MULTI_SYNC);
+ int i;
+
+ for (i = 0; i < job->bo_count; i++) {
+ /* XXX: Use shared fences for read-only objects. */
+ dma_resv_add_fence(job->bo[i]->resv, job->done_fence,
+ DMA_RESV_USAGE_WRITE);
+ }
+
+ drm_gem_unlock_reservations(job->bo, job->bo_count, acquire_ctx);
+
+ /* Update the return sync object for the job */
+ /* If it only supports a single signal semaphore*/
+ if (!has_multisync) {
+ sync_out = drm_syncobj_find(file_priv, out_sync);
+ if (sync_out) {
+ drm_syncobj_replace_fence(sync_out, done_fence);
+ drm_syncobj_put(sync_out);
+ }
+ return;
+ }
+
+ /* If multiple semaphores extension is supported */
+ if (se->out_sync_count) {
+ for (i = 0; i < se->out_sync_count; i++) {
+ drm_syncobj_replace_fence(se->out_syncs[i].syncobj,
+ done_fence);
+ drm_syncobj_put(se->out_syncs[i].syncobj);
+ }
+ kvfree(se->out_syncs);
+ }
+}
+
+static int
+v3d_setup_csd_jobs_and_bos(struct drm_file *file_priv,
+ struct v3d_dev *v3d,
+ struct drm_v3d_submit_csd *args,
+ struct v3d_csd_job **job,
+ struct v3d_job **clean_job,
+ struct v3d_submit_ext *se,
+ struct ww_acquire_ctx *acquire_ctx)
+{
+ int ret;
+
+ ret = v3d_job_allocate((void *)job, sizeof(**job));
+ if (ret)
+ return ret;
+
+ ret = v3d_job_init(v3d, file_priv, &(*job)->base,
+ v3d_job_free, args->in_sync, se, V3D_CSD);
+ if (ret)
+ return ret;
+
+ ret = v3d_job_allocate((void *)clean_job, sizeof(**clean_job));
+ if (ret)
+ return ret;
+
+ ret = v3d_job_init(v3d, file_priv, *clean_job,
+ v3d_job_free, 0, NULL, V3D_CACHE_CLEAN);
+ if (ret)
+ return ret;
+
+ (*job)->args = *args;
+
+ ret = v3d_lookup_bos(&v3d->drm, file_priv, *clean_job,
+ args->bo_handles, args->bo_handle_count);
+ if (ret)
+ return ret;
+
+ return v3d_lock_bo_reservations(*clean_job, acquire_ctx);
+}
+
+static void
+v3d_put_multisync_post_deps(struct v3d_submit_ext *se)
+{
+ unsigned int i;
+
+ if (!(se && se->out_sync_count))
+ return;
+
+ for (i = 0; i < se->out_sync_count; i++)
+ drm_syncobj_put(se->out_syncs[i].syncobj);
+ kvfree(se->out_syncs);
+}
+
+static int
+v3d_get_multisync_post_deps(struct drm_file *file_priv,
+ struct v3d_submit_ext *se,
+ u32 count, u64 handles)
+{
+ struct drm_v3d_sem __user *post_deps;
+ int i, ret;
+
+ if (!count)
+ return 0;
+
+ se->out_syncs = (struct v3d_submit_outsync *)
+ kvmalloc_array(count,
+ sizeof(struct v3d_submit_outsync),
+ GFP_KERNEL);
+ if (!se->out_syncs)
+ return -ENOMEM;
+
+ post_deps = u64_to_user_ptr(handles);
+
+ for (i = 0; i < count; i++) {
+ struct drm_v3d_sem out;
+
+ if (copy_from_user(&out, post_deps++, sizeof(out))) {
+ ret = -EFAULT;
+ DRM_DEBUG("Failed to copy post dep handles\n");
+ goto fail;
+ }
+
+ se->out_syncs[i].syncobj = drm_syncobj_find(file_priv,
+ out.handle);
+ if (!se->out_syncs[i].syncobj) {
+ ret = -EINVAL;
+ goto fail;
+ }
+ }
+ se->out_sync_count = count;
+
+ return 0;
+
+fail:
+ for (i--; i >= 0; i--)
+ drm_syncobj_put(se->out_syncs[i].syncobj);
+ kvfree(se->out_syncs);
+
+ return ret;
+}
+
+/* Get data for multiple binary semaphores synchronization. Parse syncobj
+ * to be signaled when job completes (out_sync).
+ */
+static int
+v3d_get_multisync_submit_deps(struct drm_file *file_priv,
+ struct drm_v3d_extension __user *ext,
+ struct v3d_submit_ext *se)
+{
+ struct drm_v3d_multi_sync multisync;
+ int ret;
+
+ if (se->in_sync_count || se->out_sync_count) {
+ DRM_DEBUG("Two multisync extensions were added to the same job.");
+ return -EINVAL;
+ }
+
+ if (copy_from_user(&multisync, ext, sizeof(multisync)))
+ return -EFAULT;
+
+ if (multisync.pad)
+ return -EINVAL;
+
+ ret = v3d_get_multisync_post_deps(file_priv, se, multisync.out_sync_count,
+ multisync.out_syncs);
+ if (ret)
+ return ret;
+
+ se->in_sync_count = multisync.in_sync_count;
+ se->in_syncs = multisync.in_syncs;
+ se->flags |= DRM_V3D_EXT_ID_MULTI_SYNC;
+ se->wait_stage = multisync.wait_stage;
+
+ return 0;
+}
+
+/* Get data for the indirect CSD job submission. */
+static int
+v3d_get_cpu_indirect_csd_params(struct drm_file *file_priv,
+ struct drm_v3d_extension __user *ext,
+ struct v3d_cpu_job *job)
+{
+ struct v3d_file_priv *v3d_priv = file_priv->driver_priv;
+ struct v3d_dev *v3d = v3d_priv->v3d;
+ struct drm_v3d_indirect_csd indirect_csd;
+ struct v3d_indirect_csd_info *info = &job->indirect_csd;
+
+ if (!job) {
+ DRM_DEBUG("CPU job extension was attached to a GPU job.\n");
+ return -EINVAL;
+ }
+
+ if (job->job_type) {
+ DRM_DEBUG("Two CPU job extensions were added to the same CPU job.\n");
+ return -EINVAL;
+ }
+
+ if (copy_from_user(&indirect_csd, ext, sizeof(indirect_csd)))
+ return -EFAULT;
+
+ if (!v3d_has_csd(v3d)) {
+ DRM_DEBUG("Attempting CSD submit on non-CSD hardware.\n");
+ return -EINVAL;
+ }
+
+ job->job_type = V3D_CPU_JOB_TYPE_INDIRECT_CSD;
+ info->offset = indirect_csd.offset;
+ info->wg_size = indirect_csd.wg_size;
+ memcpy(&info->wg_uniform_offsets, &indirect_csd.wg_uniform_offsets,
+ sizeof(indirect_csd.wg_uniform_offsets));
+
+ info->indirect = drm_gem_object_lookup(file_priv, indirect_csd.indirect);
+
+ return v3d_setup_csd_jobs_and_bos(file_priv, v3d, &indirect_csd.submit,
+ &info->job, &info->clean_job,
+ NULL, &info->acquire_ctx);
+}
+
+/* Get data for the query timestamp job submission. */
+static int
+v3d_get_cpu_timestamp_query_params(struct drm_file *file_priv,
+ struct drm_v3d_extension __user *ext,
+ struct v3d_cpu_job *job)
+{
+ u32 __user *offsets, *syncs;
+ struct drm_v3d_timestamp_query timestamp;
+
+ if (!job) {
+ DRM_DEBUG("CPU job extension was attached to a GPU job.\n");
+ return -EINVAL;
+ }
+
+ if (job->job_type) {
+ DRM_DEBUG("Two CPU job extensions were added to the same CPU job.\n");
+ return -EINVAL;
+ }
+
+ if (copy_from_user(&timestamp, ext, sizeof(timestamp)))
+ return -EFAULT;
+
+ if (timestamp.pad)
+ return -EINVAL;
+
+ job->job_type = V3D_CPU_JOB_TYPE_TIMESTAMP_QUERY;
+
+ job->timestamp_query.queries = kvmalloc_array(timestamp.count,
+ sizeof(struct v3d_timestamp_query),
+ GFP_KERNEL);
+ if (!job->timestamp_query.queries)
+ return -ENOMEM;
+
+ offsets = u64_to_user_ptr(timestamp.offsets);
+ syncs = u64_to_user_ptr(timestamp.syncs);
+
+ for (int i = 0; i < timestamp.count; i++) {
+ u32 offset, sync;
+
+ if (copy_from_user(&offset, offsets++, sizeof(offset))) {
+ kvfree(job->timestamp_query.queries);
+ return -EFAULT;
+ }
+
+ job->timestamp_query.queries[i].offset = offset;
+
+ if (copy_from_user(&sync, syncs++, sizeof(sync))) {
+ kvfree(job->timestamp_query.queries);
+ return -EFAULT;
+ }
+
+ job->timestamp_query.queries[i].syncobj = drm_syncobj_find(file_priv, sync);
+ }
+ job->timestamp_query.count = timestamp.count;
+
+ return 0;
+}
+
+static int
+v3d_get_cpu_reset_timestamp_params(struct drm_file *file_priv,
+ struct drm_v3d_extension __user *ext,
+ struct v3d_cpu_job *job)
+{
+ u32 __user *syncs;
+ struct drm_v3d_reset_timestamp_query reset;
+
+ if (!job) {
+ DRM_DEBUG("CPU job extension was attached to a GPU job.\n");
+ return -EINVAL;
+ }
+
+ if (job->job_type) {
+ DRM_DEBUG("Two CPU job extensions were added to the same CPU job.\n");
+ return -EINVAL;
+ }
+
+ if (copy_from_user(&reset, ext, sizeof(reset)))
+ return -EFAULT;
+
+ job->job_type = V3D_CPU_JOB_TYPE_RESET_TIMESTAMP_QUERY;
+
+ job->timestamp_query.queries = kvmalloc_array(reset.count,
+ sizeof(struct v3d_timestamp_query),
+ GFP_KERNEL);
+ if (!job->timestamp_query.queries)
+ return -ENOMEM;
+
+ syncs = u64_to_user_ptr(reset.syncs);
+
+ for (int i = 0; i < reset.count; i++) {
+ u32 sync;
+
+ job->timestamp_query.queries[i].offset = reset.offset + 8 * i;
+
+ if (copy_from_user(&sync, syncs++, sizeof(sync))) {
+ kvfree(job->timestamp_query.queries);
+ return -EFAULT;
+ }
+
+ job->timestamp_query.queries[i].syncobj = drm_syncobj_find(file_priv, sync);
+ }
+ job->timestamp_query.count = reset.count;
+
+ return 0;
+}
+
+/* Get data for the copy timestamp query results job submission. */
+static int
+v3d_get_cpu_copy_query_results_params(struct drm_file *file_priv,
+ struct drm_v3d_extension __user *ext,
+ struct v3d_cpu_job *job)
+{
+ u32 __user *offsets, *syncs;
+ struct drm_v3d_copy_timestamp_query copy;
+ int i;
+
+ if (!job) {
+ DRM_DEBUG("CPU job extension was attached to a GPU job.\n");
+ return -EINVAL;
+ }
+
+ if (job->job_type) {
+ DRM_DEBUG("Two CPU job extensions were added to the same CPU job.\n");
+ return -EINVAL;
+ }
+
+ if (copy_from_user(&copy, ext, sizeof(copy)))
+ return -EFAULT;
+
+ if (copy.pad)
+ return -EINVAL;
+
+ job->job_type = V3D_CPU_JOB_TYPE_COPY_TIMESTAMP_QUERY;
+
+ job->timestamp_query.queries = kvmalloc_array(copy.count,
+ sizeof(struct v3d_timestamp_query),
+ GFP_KERNEL);
+ if (!job->timestamp_query.queries)
+ return -ENOMEM;
+
+ offsets = u64_to_user_ptr(copy.offsets);
+ syncs = u64_to_user_ptr(copy.syncs);
+
+ for (i = 0; i < copy.count; i++) {
+ u32 offset, sync;
+
+ if (copy_from_user(&offset, offsets++, sizeof(offset))) {
+ kvfree(job->timestamp_query.queries);
+ return -EFAULT;
+ }
+
+ job->timestamp_query.queries[i].offset = offset;
+
+ if (copy_from_user(&sync, syncs++, sizeof(sync))) {
+ kvfree(job->timestamp_query.queries);
+ return -EFAULT;
+ }
+
+ job->timestamp_query.queries[i].syncobj = drm_syncobj_find(file_priv, sync);
+ }
+ job->timestamp_query.count = copy.count;
+
+ job->copy.do_64bit = copy.do_64bit;
+ job->copy.do_partial = copy.do_partial;
+ job->copy.availability_bit = copy.availability_bit;
+ job->copy.offset = copy.offset;
+ job->copy.stride = copy.stride;
+
+ return 0;
+}
+
+static int
+v3d_get_cpu_reset_performance_params(struct drm_file *file_priv,
+ struct drm_v3d_extension __user *ext,
+ struct v3d_cpu_job *job)
+{
+ u32 __user *syncs;
+ u64 __user *kperfmon_ids;
+ struct drm_v3d_reset_performance_query reset;
+
+ if (!job) {
+ DRM_DEBUG("CPU job extension was attached to a GPU job.\n");
+ return -EINVAL;
+ }
+
+ if (job->job_type) {
+ DRM_DEBUG("Two CPU job extensions were added to the same CPU job.\n");
+ return -EINVAL;
+ }
+
+ if (copy_from_user(&reset, ext, sizeof(reset)))
+ return -EFAULT;
+
+ job->job_type = V3D_CPU_JOB_TYPE_RESET_PERFORMANCE_QUERY;
+
+ job->performance_query.queries = kvmalloc_array(reset.count,
+ sizeof(struct v3d_performance_query),
+ GFP_KERNEL);
+ if (!job->performance_query.queries)
+ return -ENOMEM;
+
+ syncs = u64_to_user_ptr(reset.syncs);
+ kperfmon_ids = u64_to_user_ptr(reset.kperfmon_ids);
+
+ for (int i = 0; i < reset.count; i++) {
+ u32 sync;
+ u64 ids;
+ u32 __user *ids_pointer;
+ u32 id;
+
+ if (copy_from_user(&sync, syncs++, sizeof(sync))) {
+ kvfree(job->performance_query.queries);
+ return -EFAULT;
+ }
+
+ job->performance_query.queries[i].syncobj = drm_syncobj_find(file_priv, sync);
+
+ if (copy_from_user(&ids, kperfmon_ids++, sizeof(ids))) {
+ kvfree(job->performance_query.queries);
+ return -EFAULT;
+ }
+
+ ids_pointer = u64_to_user_ptr(ids);
+
+ for (int j = 0; j < reset.nperfmons; j++) {
+ if (copy_from_user(&id, ids_pointer++, sizeof(id))) {
+ kvfree(job->performance_query.queries);
+ return -EFAULT;
+ }
+
+ job->performance_query.queries[i].kperfmon_ids[j] = id;
+ }
+ }
+ job->performance_query.count = reset.count;
+ job->performance_query.nperfmons = reset.nperfmons;
+
+ return 0;
+}
+
+static int
+v3d_get_cpu_copy_performance_query_params(struct drm_file *file_priv,
+ struct drm_v3d_extension __user *ext,
+ struct v3d_cpu_job *job)
+{
+ u32 __user *syncs;
+ u64 __user *kperfmon_ids;
+ struct drm_v3d_copy_performance_query copy;
+
+ if (!job) {
+ DRM_DEBUG("CPU job extension was attached to a GPU job.\n");
+ return -EINVAL;
+ }
+
+ if (job->job_type) {
+ DRM_DEBUG("Two CPU job extensions were added to the same CPU job.\n");
+ return -EINVAL;
+ }
+
+ if (copy_from_user(&copy, ext, sizeof(copy)))
+ return -EFAULT;
+
+ if (copy.pad)
+ return -EINVAL;
+
+ job->job_type = V3D_CPU_JOB_TYPE_COPY_PERFORMANCE_QUERY;
+
+ job->performance_query.queries = kvmalloc_array(copy.count,
+ sizeof(struct v3d_performance_query),
+ GFP_KERNEL);
+ if (!job->performance_query.queries)
+ return -ENOMEM;
+
+ syncs = u64_to_user_ptr(copy.syncs);
+ kperfmon_ids = u64_to_user_ptr(copy.kperfmon_ids);
+
+ for (int i = 0; i < copy.count; i++) {
+ u32 sync;
+ u64 ids;
+ u32 __user *ids_pointer;
+ u32 id;
+
+ if (copy_from_user(&sync, syncs++, sizeof(sync))) {
+ kvfree(job->performance_query.queries);
+ return -EFAULT;
+ }
+
+ job->performance_query.queries[i].syncobj = drm_syncobj_find(file_priv, sync);
+
+ if (copy_from_user(&ids, kperfmon_ids++, sizeof(ids))) {
+ kvfree(job->performance_query.queries);
+ return -EFAULT;
+ }
+
+ ids_pointer = u64_to_user_ptr(ids);
+
+ for (int j = 0; j < copy.nperfmons; j++) {
+ if (copy_from_user(&id, ids_pointer++, sizeof(id))) {
+ kvfree(job->performance_query.queries);
+ return -EFAULT;
+ }
+
+ job->performance_query.queries[i].kperfmon_ids[j] = id;
+ }
+ }
+ job->performance_query.count = copy.count;
+ job->performance_query.nperfmons = copy.nperfmons;
+ job->performance_query.ncounters = copy.ncounters;
+
+ job->copy.do_64bit = copy.do_64bit;
+ job->copy.do_partial = copy.do_partial;
+ job->copy.availability_bit = copy.availability_bit;
+ job->copy.offset = copy.offset;
+ job->copy.stride = copy.stride;
+
+ return 0;
+}
+
+/* Whenever userspace sets ioctl extensions, v3d_get_extensions parses data
+ * according to the extension id (name).
+ */
+static int
+v3d_get_extensions(struct drm_file *file_priv,
+ u64 ext_handles,
+ struct v3d_submit_ext *se,
+ struct v3d_cpu_job *job)
+{
+ struct drm_v3d_extension __user *user_ext;
+ int ret;
+
+ user_ext = u64_to_user_ptr(ext_handles);
+ while (user_ext) {
+ struct drm_v3d_extension ext;
+
+ if (copy_from_user(&ext, user_ext, sizeof(ext))) {
+ DRM_DEBUG("Failed to copy submit extension\n");
+ return -EFAULT;
+ }
+
+ switch (ext.id) {
+ case DRM_V3D_EXT_ID_MULTI_SYNC:
+ ret = v3d_get_multisync_submit_deps(file_priv, user_ext, se);
+ break;
+ case DRM_V3D_EXT_ID_CPU_INDIRECT_CSD:
+ ret = v3d_get_cpu_indirect_csd_params(file_priv, user_ext, job);
+ break;
+ case DRM_V3D_EXT_ID_CPU_TIMESTAMP_QUERY:
+ ret = v3d_get_cpu_timestamp_query_params(file_priv, user_ext, job);
+ break;
+ case DRM_V3D_EXT_ID_CPU_RESET_TIMESTAMP_QUERY:
+ ret = v3d_get_cpu_reset_timestamp_params(file_priv, user_ext, job);
+ break;
+ case DRM_V3D_EXT_ID_CPU_COPY_TIMESTAMP_QUERY:
+ ret = v3d_get_cpu_copy_query_results_params(file_priv, user_ext, job);
+ break;
+ case DRM_V3D_EXT_ID_CPU_RESET_PERFORMANCE_QUERY:
+ ret = v3d_get_cpu_reset_performance_params(file_priv, user_ext, job);
+ break;
+ case DRM_V3D_EXT_ID_CPU_COPY_PERFORMANCE_QUERY:
+ ret = v3d_get_cpu_copy_performance_query_params(file_priv, user_ext, job);
+ break;
+ default:
+ DRM_DEBUG_DRIVER("Unknown extension id: %d\n", ext.id);
+ return -EINVAL;
+ }
+
+ if (ret)
+ return ret;
+
+ user_ext = u64_to_user_ptr(ext.next);
+ }
+
+ return 0;
+}
+
+/**
+ * v3d_submit_cl_ioctl() - Submits a job (frame) to the V3D.
+ * @dev: DRM device
+ * @data: ioctl argument
+ * @file_priv: DRM file for this fd
+ *
+ * This is the main entrypoint for userspace to submit a 3D frame to
+ * the GPU. Userspace provides the binner command list (if
+ * applicable), and the kernel sets up the render command list to draw
+ * to the framebuffer described in the ioctl, using the command lists
+ * that the 3D engine's binner will produce.
+ */
+int
+v3d_submit_cl_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct v3d_dev *v3d = to_v3d_dev(dev);
+ struct v3d_file_priv *v3d_priv = file_priv->driver_priv;
+ struct drm_v3d_submit_cl *args = data;
+ struct v3d_submit_ext se = {0};
+ struct v3d_bin_job *bin = NULL;
+ struct v3d_render_job *render = NULL;
+ struct v3d_job *clean_job = NULL;
+ struct v3d_job *last_job;
+ struct ww_acquire_ctx acquire_ctx;
+ int ret = 0;
+
+ trace_v3d_submit_cl_ioctl(&v3d->drm, args->rcl_start, args->rcl_end);
+
+ if (args->pad)
+ return -EINVAL;
+
+ if (args->flags &&
+ args->flags & ~(DRM_V3D_SUBMIT_CL_FLUSH_CACHE |
+ DRM_V3D_SUBMIT_EXTENSION)) {
+ DRM_INFO("invalid flags: %d\n", args->flags);
+ return -EINVAL;
+ }
+
+ if (args->flags & DRM_V3D_SUBMIT_EXTENSION) {
+ ret = v3d_get_extensions(file_priv, args->extensions, &se, NULL);
+ if (ret) {
+ DRM_DEBUG("Failed to get extensions.\n");
+ return ret;
+ }
+ }
+
+ ret = v3d_job_allocate((void *)&render, sizeof(*render));
+ if (ret)
+ return ret;
+
+ ret = v3d_job_init(v3d, file_priv, &render->base,
+ v3d_render_job_free, args->in_sync_rcl, &se, V3D_RENDER);
+ if (ret)
+ goto fail;
+
+ render->start = args->rcl_start;
+ render->end = args->rcl_end;
+ INIT_LIST_HEAD(&render->unref_list);
+
+ if (args->bcl_start != args->bcl_end) {
+ ret = v3d_job_allocate((void *)&bin, sizeof(*bin));
+ if (ret)
+ goto fail;
+
+ ret = v3d_job_init(v3d, file_priv, &bin->base,
+ v3d_job_free, args->in_sync_bcl, &se, V3D_BIN);
+ if (ret)
+ goto fail;
+
+ bin->start = args->bcl_start;
+ bin->end = args->bcl_end;
+ bin->qma = args->qma;
+ bin->qms = args->qms;
+ bin->qts = args->qts;
+ bin->render = render;
+ }
+
+ if (args->flags & DRM_V3D_SUBMIT_CL_FLUSH_CACHE) {
+ ret = v3d_job_allocate((void *)&clean_job, sizeof(*clean_job));
+ if (ret)
+ goto fail;
+
+ ret = v3d_job_init(v3d, file_priv, clean_job,
+ v3d_job_free, 0, NULL, V3D_CACHE_CLEAN);
+ if (ret)
+ goto fail;
+
+ last_job = clean_job;
+ } else {
+ last_job = &render->base;
+ }
+
+ ret = v3d_lookup_bos(dev, file_priv, last_job,
+ args->bo_handles, args->bo_handle_count);
+ if (ret)
+ goto fail;
+
+ ret = v3d_lock_bo_reservations(last_job, &acquire_ctx);
+ if (ret)
+ goto fail;
+
+ if (args->perfmon_id) {
+ render->base.perfmon = v3d_perfmon_find(v3d_priv,
+ args->perfmon_id);
+
+ if (!render->base.perfmon) {
+ ret = -ENOENT;
+ goto fail_perfmon;
+ }
+ }
+
+ mutex_lock(&v3d->sched_lock);
+ if (bin) {
+ bin->base.perfmon = render->base.perfmon;
+ v3d_perfmon_get(bin->base.perfmon);
+ v3d_push_job(&bin->base);
+
+ ret = drm_sched_job_add_dependency(&render->base.base,
+ dma_fence_get(bin->base.done_fence));
+ if (ret)
+ goto fail_unreserve;
+ }
+
+ v3d_push_job(&render->base);
+
+ if (clean_job) {
+ struct dma_fence *render_fence =
+ dma_fence_get(render->base.done_fence);
+ ret = drm_sched_job_add_dependency(&clean_job->base,
+ render_fence);
+ if (ret)
+ goto fail_unreserve;
+ clean_job->perfmon = render->base.perfmon;
+ v3d_perfmon_get(clean_job->perfmon);
+ v3d_push_job(clean_job);
+ }
+
+ mutex_unlock(&v3d->sched_lock);
+
+ v3d_attach_fences_and_unlock_reservation(file_priv,
+ last_job,
+ &acquire_ctx,
+ args->out_sync,
+ &se,
+ last_job->done_fence);
+
+ v3d_job_put(&bin->base);
+ v3d_job_put(&render->base);
+ v3d_job_put(clean_job);
+
+ return 0;
+
+fail_unreserve:
+ mutex_unlock(&v3d->sched_lock);
+fail_perfmon:
+ drm_gem_unlock_reservations(last_job->bo,
+ last_job->bo_count, &acquire_ctx);
+fail:
+ v3d_job_cleanup((void *)bin);
+ v3d_job_cleanup((void *)render);
+ v3d_job_cleanup(clean_job);
+ v3d_put_multisync_post_deps(&se);
+
+ return ret;
+}
+
+/**
+ * v3d_submit_tfu_ioctl() - Submits a TFU (texture formatting) job to the V3D.
+ * @dev: DRM device
+ * @data: ioctl argument
+ * @file_priv: DRM file for this fd
+ *
+ * Userspace provides the register setup for the TFU, which we don't
+ * need to validate since the TFU is behind the MMU.
+ */
+int
+v3d_submit_tfu_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct v3d_dev *v3d = to_v3d_dev(dev);
+ struct drm_v3d_submit_tfu *args = data;
+ struct v3d_submit_ext se = {0};
+ struct v3d_tfu_job *job = NULL;
+ struct ww_acquire_ctx acquire_ctx;
+ int ret = 0;
+
+ trace_v3d_submit_tfu_ioctl(&v3d->drm, args->iia);
+
+ if (args->flags && !(args->flags & DRM_V3D_SUBMIT_EXTENSION)) {
+ DRM_DEBUG("invalid flags: %d\n", args->flags);
+ return -EINVAL;
+ }
+
+ if (args->flags & DRM_V3D_SUBMIT_EXTENSION) {
+ ret = v3d_get_extensions(file_priv, args->extensions, &se, NULL);
+ if (ret) {
+ DRM_DEBUG("Failed to get extensions.\n");
+ return ret;
+ }
+ }
+
+ ret = v3d_job_allocate((void *)&job, sizeof(*job));
+ if (ret)
+ return ret;
+
+ ret = v3d_job_init(v3d, file_priv, &job->base,
+ v3d_job_free, args->in_sync, &se, V3D_TFU);
+ if (ret)
+ goto fail;
+
+ job->base.bo = kcalloc(ARRAY_SIZE(args->bo_handles),
+ sizeof(*job->base.bo), GFP_KERNEL);
+ if (!job->base.bo) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ job->args = *args;
+
+ for (job->base.bo_count = 0;
+ job->base.bo_count < ARRAY_SIZE(args->bo_handles);
+ job->base.bo_count++) {
+ struct drm_gem_object *bo;
+
+ if (!args->bo_handles[job->base.bo_count])
+ break;
+
+ bo = drm_gem_object_lookup(file_priv, args->bo_handles[job->base.bo_count]);
+ if (!bo) {
+ DRM_DEBUG("Failed to look up GEM BO %d: %d\n",
+ job->base.bo_count,
+ args->bo_handles[job->base.bo_count]);
+ ret = -ENOENT;
+ goto fail;
+ }
+ job->base.bo[job->base.bo_count] = bo;
+ }
+
+ ret = v3d_lock_bo_reservations(&job->base, &acquire_ctx);
+ if (ret)
+ goto fail;
+
+ mutex_lock(&v3d->sched_lock);
+ v3d_push_job(&job->base);
+ mutex_unlock(&v3d->sched_lock);
+
+ v3d_attach_fences_and_unlock_reservation(file_priv,
+ &job->base, &acquire_ctx,
+ args->out_sync,
+ &se,
+ job->base.done_fence);
+
+ v3d_job_put(&job->base);
+
+ return 0;
+
+fail:
+ v3d_job_cleanup((void *)job);
+ v3d_put_multisync_post_deps(&se);
+
+ return ret;
+}
+
+/**
+ * v3d_submit_csd_ioctl() - Submits a CSD (compute shader) job to the V3D.
+ * @dev: DRM device
+ * @data: ioctl argument
+ * @file_priv: DRM file for this fd
+ *
+ * Userspace provides the register setup for the CSD, which we don't
+ * need to validate since the CSD is behind the MMU.
+ */
+int
+v3d_submit_csd_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct v3d_dev *v3d = to_v3d_dev(dev);
+ struct v3d_file_priv *v3d_priv = file_priv->driver_priv;
+ struct drm_v3d_submit_csd *args = data;
+ struct v3d_submit_ext se = {0};
+ struct v3d_csd_job *job = NULL;
+ struct v3d_job *clean_job = NULL;
+ struct ww_acquire_ctx acquire_ctx;
+ int ret;
+
+ trace_v3d_submit_csd_ioctl(&v3d->drm, args->cfg[5], args->cfg[6]);
+
+ if (args->pad)
+ return -EINVAL;
+
+ if (!v3d_has_csd(v3d)) {
+ DRM_DEBUG("Attempting CSD submit on non-CSD hardware\n");
+ return -EINVAL;
+ }
+
+ if (args->flags && !(args->flags & DRM_V3D_SUBMIT_EXTENSION)) {
+ DRM_INFO("invalid flags: %d\n", args->flags);
+ return -EINVAL;
+ }
+
+ if (args->flags & DRM_V3D_SUBMIT_EXTENSION) {
+ ret = v3d_get_extensions(file_priv, args->extensions, &se, NULL);
+ if (ret) {
+ DRM_DEBUG("Failed to get extensions.\n");
+ return ret;
+ }
+ }
+
+ ret = v3d_setup_csd_jobs_and_bos(file_priv, v3d, args,
+ &job, &clean_job, &se,
+ &acquire_ctx);
+ if (ret)
+ goto fail;
+
+ if (args->perfmon_id) {
+ job->base.perfmon = v3d_perfmon_find(v3d_priv,
+ args->perfmon_id);
+ if (!job->base.perfmon) {
+ ret = -ENOENT;
+ goto fail_perfmon;
+ }
+ }
+
+ mutex_lock(&v3d->sched_lock);
+ v3d_push_job(&job->base);
+
+ ret = drm_sched_job_add_dependency(&clean_job->base,
+ dma_fence_get(job->base.done_fence));
+ if (ret)
+ goto fail_unreserve;
+
+ v3d_push_job(clean_job);
+ mutex_unlock(&v3d->sched_lock);
+
+ v3d_attach_fences_and_unlock_reservation(file_priv,
+ clean_job,
+ &acquire_ctx,
+ args->out_sync,
+ &se,
+ clean_job->done_fence);
+
+ v3d_job_put(&job->base);
+ v3d_job_put(clean_job);
+
+ return 0;
+
+fail_unreserve:
+ mutex_unlock(&v3d->sched_lock);
+fail_perfmon:
+ drm_gem_unlock_reservations(clean_job->bo, clean_job->bo_count,
+ &acquire_ctx);
+fail:
+ v3d_job_cleanup((void *)job);
+ v3d_job_cleanup(clean_job);
+ v3d_put_multisync_post_deps(&se);
+
+ return ret;
+}
+
+static const unsigned int cpu_job_bo_handle_count[] = {
+ [V3D_CPU_JOB_TYPE_INDIRECT_CSD] = 1,
+ [V3D_CPU_JOB_TYPE_TIMESTAMP_QUERY] = 1,
+ [V3D_CPU_JOB_TYPE_RESET_TIMESTAMP_QUERY] = 1,
+ [V3D_CPU_JOB_TYPE_COPY_TIMESTAMP_QUERY] = 2,
+ [V3D_CPU_JOB_TYPE_RESET_PERFORMANCE_QUERY] = 0,
+ [V3D_CPU_JOB_TYPE_COPY_PERFORMANCE_QUERY] = 1,
+};
+
+/**
+ * v3d_submit_cpu_ioctl() - Submits a CPU job to the V3D.
+ * @dev: DRM device
+ * @data: ioctl argument
+ * @file_priv: DRM file for this fd
+ *
+ * Userspace specifies the CPU job type and data required to perform its
+ * operations through the drm_v3d_extension struct.
+ */
+int
+v3d_submit_cpu_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct v3d_dev *v3d = to_v3d_dev(dev);
+ struct drm_v3d_submit_cpu *args = data;
+ struct v3d_submit_ext se = {0};
+ struct v3d_submit_ext *out_se = NULL;
+ struct v3d_cpu_job *cpu_job = NULL;
+ struct v3d_csd_job *csd_job = NULL;
+ struct v3d_job *clean_job = NULL;
+ struct ww_acquire_ctx acquire_ctx;
+ int ret;
+
+ if (args->flags && !(args->flags & DRM_V3D_SUBMIT_EXTENSION)) {
+ DRM_INFO("Invalid flags: %d\n", args->flags);
+ return -EINVAL;
+ }
+
+ ret = v3d_job_allocate((void *)&cpu_job, sizeof(*cpu_job));
+ if (ret)
+ return ret;
+
+ if (args->flags & DRM_V3D_SUBMIT_EXTENSION) {
+ ret = v3d_get_extensions(file_priv, args->extensions, &se, cpu_job);
+ if (ret) {
+ DRM_DEBUG("Failed to get extensions.\n");
+ goto fail;
+ }
+ }
+
+ /* Every CPU job must have a CPU job user extension */
+ if (!cpu_job->job_type) {
+ DRM_DEBUG("CPU job must have a CPU job user extension.\n");
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ if (args->bo_handle_count != cpu_job_bo_handle_count[cpu_job->job_type]) {
+ DRM_DEBUG("This CPU job was not submitted with the proper number of BOs.\n");
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ trace_v3d_submit_cpu_ioctl(&v3d->drm, cpu_job->job_type);
+
+ ret = v3d_job_init(v3d, file_priv, &cpu_job->base,
+ v3d_job_free, 0, &se, V3D_CPU);
+ if (ret)
+ goto fail;
+
+ clean_job = cpu_job->indirect_csd.clean_job;
+ csd_job = cpu_job->indirect_csd.job;
+
+ if (args->bo_handle_count) {
+ ret = v3d_lookup_bos(dev, file_priv, &cpu_job->base,
+ args->bo_handles, args->bo_handle_count);
+ if (ret)
+ goto fail;
+
+ ret = v3d_lock_bo_reservations(&cpu_job->base, &acquire_ctx);
+ if (ret)
+ goto fail;
+ }
+
+ mutex_lock(&v3d->sched_lock);
+ v3d_push_job(&cpu_job->base);
+
+ switch (cpu_job->job_type) {
+ case V3D_CPU_JOB_TYPE_INDIRECT_CSD:
+ ret = drm_sched_job_add_dependency(&csd_job->base.base,
+ dma_fence_get(cpu_job->base.done_fence));
+ if (ret)
+ goto fail_unreserve;
+
+ v3d_push_job(&csd_job->base);
+
+ ret = drm_sched_job_add_dependency(&clean_job->base,
+ dma_fence_get(csd_job->base.done_fence));
+ if (ret)
+ goto fail_unreserve;
+
+ v3d_push_job(clean_job);
+
+ break;
+ default:
+ break;
+ }
+ mutex_unlock(&v3d->sched_lock);
+
+ out_se = (cpu_job->job_type == V3D_CPU_JOB_TYPE_INDIRECT_CSD) ? NULL : &se;
+
+ v3d_attach_fences_and_unlock_reservation(file_priv,
+ &cpu_job->base,
+ &acquire_ctx, 0,
+ out_se, cpu_job->base.done_fence);
+
+ switch (cpu_job->job_type) {
+ case V3D_CPU_JOB_TYPE_INDIRECT_CSD:
+ v3d_attach_fences_and_unlock_reservation(file_priv,
+ clean_job,
+ &cpu_job->indirect_csd.acquire_ctx,
+ 0, &se, clean_job->done_fence);
+ break;
+ default:
+ break;
+ }
+
+ v3d_job_put(&cpu_job->base);
+ v3d_job_put(&csd_job->base);
+ v3d_job_put(clean_job);
+
+ return 0;
+
+fail_unreserve:
+ mutex_unlock(&v3d->sched_lock);
+
+ drm_gem_unlock_reservations(cpu_job->base.bo, cpu_job->base.bo_count,
+ &acquire_ctx);
+
+ drm_gem_unlock_reservations(clean_job->bo, clean_job->bo_count,
+ &cpu_job->indirect_csd.acquire_ctx);
+
+fail:
+ v3d_job_cleanup((void *)cpu_job);
+ v3d_job_cleanup((void *)csd_job);
+ v3d_job_cleanup(clean_job);
+ v3d_put_multisync_post_deps(&se);
+ kvfree(cpu_job->timestamp_query.queries);
+ kvfree(cpu_job->performance_query.queries);
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/v3d/v3d_trace.h b/drivers/gpu/drm/v3d/v3d_trace.h
index 7aa8dc356e54..5917b94148f5 100644
--- a/drivers/gpu/drm/v3d/v3d_trace.h
+++ b/drivers/gpu/drm/v3d/v3d_trace.h
@@ -225,6 +225,63 @@ TRACE_EVENT(v3d_submit_csd,
__entry->seqno)
);
+TRACE_EVENT(v3d_submit_cpu_ioctl,
+ TP_PROTO(struct drm_device *dev, enum v3d_cpu_job_type job_type),
+ TP_ARGS(dev, job_type),
+
+ TP_STRUCT__entry(
+ __field(u32, dev)
+ __field(enum v3d_cpu_job_type, job_type)
+ ),
+
+ TP_fast_assign(
+ __entry->dev = dev->primary->index;
+ __entry->job_type = job_type;
+ ),
+
+ TP_printk("dev=%u, job_type=%d",
+ __entry->dev,
+ __entry->job_type)
+);
+
+TRACE_EVENT(v3d_cpu_job_begin,
+ TP_PROTO(struct drm_device *dev, enum v3d_cpu_job_type job_type),
+ TP_ARGS(dev, job_type),
+
+ TP_STRUCT__entry(
+ __field(u32, dev)
+ __field(enum v3d_cpu_job_type, job_type)
+ ),
+
+ TP_fast_assign(
+ __entry->dev = dev->primary->index;
+ __entry->job_type = job_type;
+ ),
+
+ TP_printk("dev=%u, job_type=%d",
+ __entry->dev,
+ __entry->job_type)
+);
+
+TRACE_EVENT(v3d_cpu_job_end,
+ TP_PROTO(struct drm_device *dev, enum v3d_cpu_job_type job_type),
+ TP_ARGS(dev, job_type),
+
+ TP_STRUCT__entry(
+ __field(u32, dev)
+ __field(enum v3d_cpu_job_type, job_type)
+ ),
+
+ TP_fast_assign(
+ __entry->dev = dev->primary->index;
+ __entry->job_type = job_type;
+ ),
+
+ TP_printk("dev=%u, job_type=%d",
+ __entry->dev,
+ __entry->job_type)
+);
+
TRACE_EVENT(v3d_cache_clean_begin,
TP_PROTO(struct drm_device *dev),
TP_ARGS(dev),
diff --git a/drivers/gpu/drm/vboxvideo/vbox_drv.c b/drivers/gpu/drm/vboxvideo/vbox_drv.c
index 047b95812334..cd9e66a06596 100644
--- a/drivers/gpu/drm/vboxvideo/vbox_drv.c
+++ b/drivers/gpu/drm/vboxvideo/vbox_drv.c
@@ -182,7 +182,7 @@ DEFINE_DRM_GEM_FOPS(vbox_fops);
static const struct drm_driver driver = {
.driver_features =
- DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC,
+ DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC | DRIVER_CURSOR_HOTSPOT,
.fops = &vbox_fops,
.name = DRIVER_NAME,
diff --git a/drivers/gpu/drm/vboxvideo/vbox_mode.c b/drivers/gpu/drm/vboxvideo/vbox_mode.c
index 341edd982cb3..9ff3bade9795 100644
--- a/drivers/gpu/drm/vboxvideo/vbox_mode.c
+++ b/drivers/gpu/drm/vboxvideo/vbox_mode.c
@@ -429,8 +429,8 @@ static void vbox_cursor_atomic_update(struct drm_plane *plane,
flags = VBOX_MOUSE_POINTER_VISIBLE | VBOX_MOUSE_POINTER_SHAPE |
VBOX_MOUSE_POINTER_ALPHA;
hgsmi_update_pointer_shape(vbox->guest_pool, flags,
- min_t(u32, max(fb->hot_x, 0), width),
- min_t(u32, max(fb->hot_y, 0), height),
+ min_t(u32, max(new_state->hotspot_x, 0), width),
+ min_t(u32, max(new_state->hotspot_y, 0), height),
width, height, vbox->cursor_data, data_size);
mutex_unlock(&vbox->hw_mutex);
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.c b/drivers/gpu/drm/virtio/virtgpu_drv.c
index 4334c7608408..f8e9abe647b9 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.c
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.c
@@ -177,7 +177,7 @@ static const struct drm_driver driver = {
* out via drm_device::driver_features:
*/
.driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_RENDER | DRIVER_ATOMIC |
- DRIVER_SYNCOBJ | DRIVER_SYNCOBJ_TIMELINE,
+ DRIVER_SYNCOBJ | DRIVER_SYNCOBJ_TIMELINE | DRIVER_CURSOR_HOTSPOT,
.open = virtio_gpu_driver_open,
.postclose = virtio_gpu_driver_postclose,
diff --git a/drivers/gpu/drm/virtio/virtgpu_plane.c b/drivers/gpu/drm/virtio/virtgpu_plane.c
index a2e045f3a000..a72a2dbda031 100644
--- a/drivers/gpu/drm/virtio/virtgpu_plane.c
+++ b/drivers/gpu/drm/virtio/virtgpu_plane.c
@@ -79,6 +79,8 @@ static int virtio_gpu_plane_atomic_check(struct drm_plane *plane,
{
struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
plane);
+ struct drm_plane_state *old_plane_state = drm_atomic_get_old_plane_state(state,
+ plane);
bool is_cursor = plane->type == DRM_PLANE_TYPE_CURSOR;
struct drm_crtc_state *crtc_state;
int ret;
@@ -86,6 +88,14 @@ static int virtio_gpu_plane_atomic_check(struct drm_plane *plane,
if (!new_plane_state->fb || WARN_ON(!new_plane_state->crtc))
return 0;
+ /*
+ * Ignore damage clips if the framebuffer attached to the plane's state
+ * has changed since the last plane update (page-flip). In this case, a
+ * full plane update should happen because uploads are done per-buffer.
+ */
+ if (old_plane_state->fb != new_plane_state->fb)
+ new_plane_state->ignore_damage_clips = true;
+
crtc_state = drm_atomic_get_crtc_state(state,
new_plane_state->crtc);
if (IS_ERR(crtc_state))
@@ -323,16 +333,16 @@ static void virtio_gpu_cursor_plane_update(struct drm_plane *plane,
DRM_DEBUG("update, handle %d, pos +%d+%d, hot %d,%d\n", handle,
plane->state->crtc_x,
plane->state->crtc_y,
- plane->state->fb ? plane->state->fb->hot_x : 0,
- plane->state->fb ? plane->state->fb->hot_y : 0);
+ plane->state->hotspot_x,
+ plane->state->hotspot_y);
output->cursor.hdr.type =
cpu_to_le32(VIRTIO_GPU_CMD_UPDATE_CURSOR);
output->cursor.resource_id = cpu_to_le32(handle);
if (plane->state->fb) {
output->cursor.hot_x =
- cpu_to_le32(plane->state->fb->hot_x);
+ cpu_to_le32(plane->state->hotspot_x);
output->cursor.hot_y =
- cpu_to_le32(plane->state->fb->hot_y);
+ cpu_to_le32(plane->state->hotspot_y);
} else {
output->cursor.hot_x = cpu_to_le32(0);
output->cursor.hot_y = cpu_to_le32(0);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 8b24ecf60e3e..d3e308fdfd5b 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -1611,7 +1611,7 @@ static const struct file_operations vmwgfx_driver_fops = {
static const struct drm_driver driver = {
.driver_features =
- DRIVER_MODESET | DRIVER_RENDER | DRIVER_ATOMIC | DRIVER_GEM,
+ DRIVER_MODESET | DRIVER_RENDER | DRIVER_ATOMIC | DRIVER_GEM | DRIVER_CURSOR_HOTSPOT,
.ioctls = vmw_ioctls,
.num_ioctls = ARRAY_SIZE(vmw_ioctls),
.master_set = vmw_master_set,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index 818b7f109f53..5fd0ccaa0b41 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -768,13 +768,8 @@ vmw_du_cursor_plane_atomic_update(struct drm_plane *plane,
struct vmw_plane_state *old_vps = vmw_plane_state_to_vps(old_state);
s32 hotspot_x, hotspot_y;
- hotspot_x = du->hotspot_x;
- hotspot_y = du->hotspot_y;
-
- if (new_state->fb) {
- hotspot_x += new_state->fb->hot_x;
- hotspot_y += new_state->fb->hot_y;
- }
+ hotspot_x = du->hotspot_x + new_state->hotspot_x;
+ hotspot_y = du->hotspot_y + new_state->hotspot_y;
du->cursor_surface = vps->surf;
du->cursor_bo = vps->bo;
@@ -837,10 +832,21 @@ int vmw_du_primary_plane_atomic_check(struct drm_plane *plane,
{
struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
plane);
+ struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state,
+ plane);
struct drm_crtc_state *crtc_state = NULL;
struct drm_framebuffer *new_fb = new_state->fb;
+ struct drm_framebuffer *old_fb = old_state->fb;
int ret;
+ /*
+ * Ignore damage clips if the framebuffer attached to the plane's state
+ * has changed since the last plane update (page-flip). In this case, a
+ * full plane update should happen because uploads are done per-buffer.
+ */
+ if (old_fb != new_fb)
+ new_state->ignore_damage_clips = true;
+
if (new_state->crtc)
crtc_state = drm_atomic_get_new_crtc_state(state,
new_state->crtc);
diff --git a/drivers/gpu/drm/xlnx/zynqmp_kms.c b/drivers/gpu/drm/xlnx/zynqmp_kms.c
index a7f8611be6f4..db3bb4afbfc4 100644
--- a/drivers/gpu/drm/xlnx/zynqmp_kms.c
+++ b/drivers/gpu/drm/xlnx/zynqmp_kms.c
@@ -27,7 +27,6 @@
#include <drm/drm_managed.h>
#include <drm/drm_mode_config.h>
#include <drm/drm_plane.h>
-#include <drm/drm_plane_helper.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_simple_kms_helper.h>
#include <drm/drm_vblank.h>
diff --git a/drivers/greybus/Kconfig b/drivers/greybus/Kconfig
index 033d31dbf3b8..ab81ceceb337 100644
--- a/drivers/greybus/Kconfig
+++ b/drivers/greybus/Kconfig
@@ -20,6 +20,7 @@ if GREYBUS
config GREYBUS_BEAGLEPLAY
tristate "Greybus BeaglePlay driver"
depends on SERIAL_DEV_BUS
+ select CRC_CCITT
help
Select this option if you have a BeaglePlay where CC1352
co-processor acts as Greybus SVC.
diff --git a/drivers/hid/hid-picolcd_fb.c b/drivers/hid/hid-picolcd_fb.c
index a4dccdcda26f..d7dddd99d325 100644
--- a/drivers/hid/hid-picolcd_fb.c
+++ b/drivers/hid/hid-picolcd_fb.c
@@ -505,6 +505,7 @@ int picolcd_init_framebuffer(struct picolcd_data *data)
dev_err(dev, "can't get a free page for framebuffer\n");
goto err_nomem;
}
+ info->flags |= FBINFO_VIRTFB;
info->screen_buffer = fbdata->bitmap;
info->fix.smem_start = (unsigned long)fbdata->bitmap;
memset(fbdata->vbitmap, 0xff, PICOLCDFB_SIZE);
diff --git a/drivers/hwmon/acpi_power_meter.c b/drivers/hwmon/acpi_power_meter.c
index 8db740214ffd..703666b95bf4 100644
--- a/drivers/hwmon/acpi_power_meter.c
+++ b/drivers/hwmon/acpi_power_meter.c
@@ -31,6 +31,7 @@
#define POWER_METER_CAN_NOTIFY (1 << 3)
#define POWER_METER_IS_BATTERY (1 << 8)
#define UNKNOWN_HYSTERESIS 0xFFFFFFFF
+#define UNKNOWN_POWER 0xFFFFFFFF
#define METER_NOTIFY_CONFIG 0x80
#define METER_NOTIFY_TRIP 0x81
@@ -348,6 +349,9 @@ static ssize_t show_power(struct device *dev,
update_meter(resource);
mutex_unlock(&resource->lock);
+ if (resource->power == UNKNOWN_POWER)
+ return -ENODATA;
+
return sprintf(buf, "%llu\n", resource->power * 1000);
}
diff --git a/drivers/hwmon/corsair-psu.c b/drivers/hwmon/corsair-psu.c
index 904890598c11..2c7c92272fe3 100644
--- a/drivers/hwmon/corsair-psu.c
+++ b/drivers/hwmon/corsair-psu.c
@@ -899,7 +899,23 @@ static struct hid_driver corsairpsu_driver = {
.reset_resume = corsairpsu_resume,
#endif
};
-module_hid_driver(corsairpsu_driver);
+
+static int __init corsair_init(void)
+{
+ return hid_register_driver(&corsairpsu_driver);
+}
+
+static void __exit corsair_exit(void)
+{
+ hid_unregister_driver(&corsairpsu_driver);
+}
+
+/*
+ * With module_init() the driver would load before the HID bus when
+ * built-in, so use late_initcall() instead.
+ */
+late_initcall(corsair_init);
+module_exit(corsair_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Wilken Gottwalt <[email protected]>");
diff --git a/drivers/hwmon/ltc2991.c b/drivers/hwmon/ltc2991.c
index bd63c61129a9..fc53fdcb2b6c 100644
--- a/drivers/hwmon/ltc2991.c
+++ b/drivers/hwmon/ltc2991.c
@@ -373,7 +373,7 @@ static int ltc2991_init(struct ltc2991_state *st)
LTC2991_REPEAT_ACQ_EN);
if (ret)
return dev_err_probe(st->dev, ret,
- "Error: Failed to set contiuous mode.\n");
+ "Error: Failed to set continuous mode.\n");
/* Enable all channels and trigger conversions */
return regmap_write(st->regmap, LTC2991_CH_EN_TRIGGER,
diff --git a/drivers/hwmon/max31827.c b/drivers/hwmon/max31827.c
index fd1fed1a797c..a1ce65145669 100644
--- a/drivers/hwmon/max31827.c
+++ b/drivers/hwmon/max31827.c
@@ -12,6 +12,7 @@
#include <linux/i2c.h>
#include <linux/mutex.h>
#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
#define MAX31827_T_REG 0x0
#define MAX31827_CONFIGURATION_REG 0x2
diff --git a/drivers/hwmon/nzxt-kraken2.c b/drivers/hwmon/nzxt-kraken2.c
index 428c77b5fce5..7caf387eb144 100644
--- a/drivers/hwmon/nzxt-kraken2.c
+++ b/drivers/hwmon/nzxt-kraken2.c
@@ -161,13 +161,13 @@ static int kraken2_probe(struct hid_device *hdev,
ret = hid_hw_start(hdev, HID_CONNECT_HIDRAW);
if (ret) {
hid_err(hdev, "hid hw start failed with %d\n", ret);
- goto fail_and_stop;
+ return ret;
}
ret = hid_hw_open(hdev);
if (ret) {
hid_err(hdev, "hid hw open failed with %d\n", ret);
- goto fail_and_close;
+ goto fail_and_stop;
}
priv->hwmon_dev = hwmon_device_register_with_info(&hdev->dev, "kraken2",
diff --git a/drivers/hwtracing/coresight/coresight-etm-perf.c b/drivers/hwtracing/coresight/coresight-etm-perf.c
index 5ca6278baff4..89e8ed214ea4 100644
--- a/drivers/hwtracing/coresight/coresight-etm-perf.c
+++ b/drivers/hwtracing/coresight/coresight-etm-perf.c
@@ -493,7 +493,7 @@ static void etm_event_start(struct perf_event *event, int flags)
goto fail_end_stop;
/* Finally enable the tracer */
- if (coresight_enable_source(csdev, CS_MODE_PERF, event))
+ if (source_ops(csdev)->enable(csdev, event, CS_MODE_PERF))
goto fail_disable_path;
/*
@@ -587,7 +587,7 @@ static void etm_event_stop(struct perf_event *event, int mode)
return;
/* stop tracer */
- coresight_disable_source(csdev, event);
+ source_ops(csdev)->disable(csdev, event);
/* tell the core */
event->hw.state = PERF_HES_STOPPED;
diff --git a/drivers/hwtracing/coresight/coresight-etm4x-core.c b/drivers/hwtracing/coresight/coresight-etm4x-core.c
index 77b0271ce6eb..34aee59dd147 100644
--- a/drivers/hwtracing/coresight/coresight-etm4x-core.c
+++ b/drivers/hwtracing/coresight/coresight-etm4x-core.c
@@ -2224,7 +2224,7 @@ static void clear_etmdrvdata(void *info)
per_cpu(delayed_probe, cpu) = NULL;
}
-static void __exit etm4_remove_dev(struct etmv4_drvdata *drvdata)
+static void etm4_remove_dev(struct etmv4_drvdata *drvdata)
{
bool had_delayed_probe;
/*
@@ -2253,7 +2253,7 @@ static void __exit etm4_remove_dev(struct etmv4_drvdata *drvdata)
}
}
-static void __exit etm4_remove_amba(struct amba_device *adev)
+static void etm4_remove_amba(struct amba_device *adev)
{
struct etmv4_drvdata *drvdata = dev_get_drvdata(&adev->dev);
@@ -2261,7 +2261,7 @@ static void __exit etm4_remove_amba(struct amba_device *adev)
etm4_remove_dev(drvdata);
}
-static int __exit etm4_remove_platform_dev(struct platform_device *pdev)
+static int etm4_remove_platform_dev(struct platform_device *pdev)
{
struct etmv4_drvdata *drvdata = dev_get_drvdata(&pdev->dev);
diff --git a/drivers/hwtracing/coresight/ultrasoc-smb.c b/drivers/hwtracing/coresight/ultrasoc-smb.c
index e9a32a97fbee..6e32d31a95fe 100644
--- a/drivers/hwtracing/coresight/ultrasoc-smb.c
+++ b/drivers/hwtracing/coresight/ultrasoc-smb.c
@@ -99,7 +99,7 @@ static int smb_open(struct inode *inode, struct file *file)
struct smb_drv_data, miscdev);
int ret = 0;
- mutex_lock(&drvdata->mutex);
+ spin_lock(&drvdata->spinlock);
if (drvdata->reading) {
ret = -EBUSY;
@@ -115,7 +115,7 @@ static int smb_open(struct inode *inode, struct file *file)
drvdata->reading = true;
out:
- mutex_unlock(&drvdata->mutex);
+ spin_unlock(&drvdata->spinlock);
return ret;
}
@@ -132,10 +132,8 @@ static ssize_t smb_read(struct file *file, char __user *data, size_t len,
if (!len)
return 0;
- mutex_lock(&drvdata->mutex);
-
if (!sdb->data_size)
- goto out;
+ return 0;
to_copy = min(sdb->data_size, len);
@@ -145,20 +143,15 @@ static ssize_t smb_read(struct file *file, char __user *data, size_t len,
if (copy_to_user(data, sdb->buf_base + sdb->buf_rdptr, to_copy)) {
dev_dbg(dev, "Failed to copy data to user\n");
- to_copy = -EFAULT;
- goto out;
+ return -EFAULT;
}
*ppos += to_copy;
-
smb_update_read_ptr(drvdata, to_copy);
-
- dev_dbg(dev, "%zu bytes copied\n", to_copy);
-out:
if (!sdb->data_size)
smb_reset_buffer(drvdata);
- mutex_unlock(&drvdata->mutex);
+ dev_dbg(dev, "%zu bytes copied\n", to_copy);
return to_copy;
}
@@ -167,9 +160,9 @@ static int smb_release(struct inode *inode, struct file *file)
struct smb_drv_data *drvdata = container_of(file->private_data,
struct smb_drv_data, miscdev);
- mutex_lock(&drvdata->mutex);
+ spin_lock(&drvdata->spinlock);
drvdata->reading = false;
- mutex_unlock(&drvdata->mutex);
+ spin_unlock(&drvdata->spinlock);
return 0;
}
@@ -262,7 +255,7 @@ static int smb_enable(struct coresight_device *csdev, enum cs_mode mode,
struct smb_drv_data *drvdata = dev_get_drvdata(csdev->dev.parent);
int ret = 0;
- mutex_lock(&drvdata->mutex);
+ spin_lock(&drvdata->spinlock);
/* Do nothing, the trace data is reading by other interface now */
if (drvdata->reading) {
@@ -294,7 +287,7 @@ static int smb_enable(struct coresight_device *csdev, enum cs_mode mode,
dev_dbg(&csdev->dev, "Ultrasoc SMB enabled\n");
out:
- mutex_unlock(&drvdata->mutex);
+ spin_unlock(&drvdata->spinlock);
return ret;
}
@@ -304,7 +297,7 @@ static int smb_disable(struct coresight_device *csdev)
struct smb_drv_data *drvdata = dev_get_drvdata(csdev->dev.parent);
int ret = 0;
- mutex_lock(&drvdata->mutex);
+ spin_lock(&drvdata->spinlock);
if (drvdata->reading) {
ret = -EBUSY;
@@ -327,7 +320,7 @@ static int smb_disable(struct coresight_device *csdev)
dev_dbg(&csdev->dev, "Ultrasoc SMB disabled\n");
out:
- mutex_unlock(&drvdata->mutex);
+ spin_unlock(&drvdata->spinlock);
return ret;
}
@@ -408,7 +401,7 @@ static unsigned long smb_update_buffer(struct coresight_device *csdev,
if (!buf)
return 0;
- mutex_lock(&drvdata->mutex);
+ spin_lock(&drvdata->spinlock);
/* Don't do anything if another tracer is using this sink. */
if (atomic_read(&csdev->refcnt) != 1)
@@ -432,7 +425,7 @@ static unsigned long smb_update_buffer(struct coresight_device *csdev,
if (!buf->snapshot && lost)
perf_aux_output_flag(handle, PERF_AUX_FLAG_TRUNCATED);
out:
- mutex_unlock(&drvdata->mutex);
+ spin_unlock(&drvdata->spinlock);
return data_size;
}
@@ -484,7 +477,6 @@ static int smb_init_data_buffer(struct platform_device *pdev,
static void smb_init_hw(struct smb_drv_data *drvdata)
{
smb_disable_hw(drvdata);
- smb_reset_buffer(drvdata);
writel(SMB_LB_CFG_LO_DEFAULT, drvdata->base + SMB_LB_CFG_LO_REG);
writel(SMB_LB_CFG_HI_DEFAULT, drvdata->base + SMB_LB_CFG_HI_REG);
@@ -590,37 +582,33 @@ static int smb_probe(struct platform_device *pdev)
return ret;
}
- mutex_init(&drvdata->mutex);
+ ret = smb_config_inport(dev, true);
+ if (ret)
+ return ret;
+
+ smb_reset_buffer(drvdata);
+ platform_set_drvdata(pdev, drvdata);
+ spin_lock_init(&drvdata->spinlock);
drvdata->pid = -1;
ret = smb_register_sink(pdev, drvdata);
if (ret) {
+ smb_config_inport(&pdev->dev, false);
dev_err(dev, "Failed to register SMB sink\n");
return ret;
}
- ret = smb_config_inport(dev, true);
- if (ret) {
- smb_unregister_sink(drvdata);
- return ret;
- }
-
- platform_set_drvdata(pdev, drvdata);
-
return 0;
}
static int smb_remove(struct platform_device *pdev)
{
struct smb_drv_data *drvdata = platform_get_drvdata(pdev);
- int ret;
-
- ret = smb_config_inport(&pdev->dev, false);
- if (ret)
- return ret;
smb_unregister_sink(drvdata);
+ smb_config_inport(&pdev->dev, false);
+
return 0;
}
diff --git a/drivers/hwtracing/coresight/ultrasoc-smb.h b/drivers/hwtracing/coresight/ultrasoc-smb.h
index d2e14e8d2c8a..82a44c14a882 100644
--- a/drivers/hwtracing/coresight/ultrasoc-smb.h
+++ b/drivers/hwtracing/coresight/ultrasoc-smb.h
@@ -8,7 +8,7 @@
#define _ULTRASOC_SMB_H
#include <linux/miscdevice.h>
-#include <linux/mutex.h>
+#include <linux/spinlock.h>
/* Offset of SMB global registers */
#define SMB_GLB_CFG_REG 0x00
@@ -105,7 +105,7 @@ struct smb_data_buffer {
* @csdev: Component vitals needed by the framework.
* @sdb: Data buffer for SMB.
* @miscdev: Specifics to handle "/dev/xyz.smb" entry.
- * @mutex: Control data access to one at a time.
+ * @spinlock: Control data access to one at a time.
* @reading: Synchronise user space access to SMB buffer.
* @pid: Process ID of the process being monitored by the
* session that is using this component.
@@ -116,7 +116,7 @@ struct smb_drv_data {
struct coresight_device *csdev;
struct smb_data_buffer sdb;
struct miscdevice miscdev;
- struct mutex mutex;
+ spinlock_t spinlock;
bool reading;
pid_t pid;
enum cs_mode mode;
diff --git a/drivers/hwtracing/ptt/hisi_ptt.c b/drivers/hwtracing/ptt/hisi_ptt.c
index 49ea1b0f7489..a991ecb7515a 100644
--- a/drivers/hwtracing/ptt/hisi_ptt.c
+++ b/drivers/hwtracing/ptt/hisi_ptt.c
@@ -342,9 +342,9 @@ static int hisi_ptt_register_irq(struct hisi_ptt *hisi_ptt)
return ret;
hisi_ptt->trace_irq = pci_irq_vector(pdev, HISI_PTT_TRACE_DMA_IRQ);
- ret = devm_request_threaded_irq(&pdev->dev, hisi_ptt->trace_irq,
- NULL, hisi_ptt_isr, 0,
- DRV_NAME, hisi_ptt);
+ ret = devm_request_irq(&pdev->dev, hisi_ptt->trace_irq, hisi_ptt_isr,
+ IRQF_NOBALANCING | IRQF_NO_THREAD, DRV_NAME,
+ hisi_ptt);
if (ret) {
pci_err(pdev, "failed to request irq %d, ret = %d\n",
hisi_ptt->trace_irq, ret);
@@ -1000,6 +1000,9 @@ static int hisi_ptt_pmu_event_init(struct perf_event *event)
return -EOPNOTSUPP;
}
+ if (event->attach_state & PERF_ATTACH_TASK)
+ return -EOPNOTSUPP;
+
if (event->attr.type != hisi_ptt->hisi_ptt_pmu.type)
return -ENOENT;
@@ -1178,6 +1181,10 @@ static void hisi_ptt_pmu_del(struct perf_event *event, int flags)
hisi_ptt_pmu_stop(event, PERF_EF_UPDATE);
}
+static void hisi_ptt_pmu_read(struct perf_event *event)
+{
+}
+
static void hisi_ptt_remove_cpuhp_instance(void *hotplug_node)
{
cpuhp_state_remove_instance_nocalls(hisi_ptt_pmu_online, hotplug_node);
@@ -1221,6 +1228,7 @@ static int hisi_ptt_register_pmu(struct hisi_ptt *hisi_ptt)
.stop = hisi_ptt_pmu_stop,
.add = hisi_ptt_pmu_add,
.del = hisi_ptt_pmu_del,
+ .read = hisi_ptt_pmu_read,
};
reg = readl(hisi_ptt->iobase + HISI_PTT_LOCATION);
diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c
index f9ab671c8eda..07c571c7b699 100644
--- a/drivers/infiniband/core/umem.c
+++ b/drivers/infiniband/core/umem.c
@@ -96,12 +96,6 @@ unsigned long ib_umem_find_best_pgsz(struct ib_umem *umem,
return page_size;
}
- /* rdma_for_each_block() has a bug if the page size is smaller than the
- * page size used to build the umem. For now prevent smaller page sizes
- * from being returned.
- */
- pgsz_bitmap &= GENMASK(BITS_PER_LONG - 1, PAGE_SHIFT);
-
/* The best result is the smallest page size that results in the minimum
* number of required pages. Compute the largest page size that could
* work based on VA address bits that don't change.
diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
index 8a6da87f464b..94a7f3b0c71c 100644
--- a/drivers/infiniband/core/verbs.c
+++ b/drivers/infiniband/core/verbs.c
@@ -1971,7 +1971,7 @@ int ib_get_eth_speed(struct ib_device *dev, u32 port_num, u16 *speed, u8 *width)
int rc;
u32 netdev_speed;
struct net_device *netdev;
- struct ethtool_link_ksettings lksettings;
+ struct ethtool_link_ksettings lksettings = {};
if (rdma_port_get_link_layer(dev, port_num) != IB_LINK_LAYER_ETHERNET)
return -EINVAL;
diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c
index f79369c8360a..a99c68247af0 100644
--- a/drivers/infiniband/hw/bnxt_re/main.c
+++ b/drivers/infiniband/hw/bnxt_re/main.c
@@ -71,7 +71,7 @@ static char version[] =
BNXT_RE_DESC "\n";
MODULE_AUTHOR("Eddie Wai <[email protected]>");
-MODULE_DESCRIPTION(BNXT_RE_DESC " Driver");
+MODULE_DESCRIPTION(BNXT_RE_DESC);
MODULE_LICENSE("Dual BSD/GPL");
/* globals */
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
index 0cd2612a4987..2bca9560f32d 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
@@ -4760,10 +4760,15 @@ static int check_cong_type(struct ib_qp *ibqp,
cong_alg->wnd_mode_sel = WND_LIMIT;
break;
default:
- ibdev_err(&hr_dev->ib_dev,
- "error type(%u) for congestion selection.\n",
- hr_dev->caps.cong_type);
- return -EINVAL;
+ ibdev_warn(&hr_dev->ib_dev,
+ "invalid type(%u) for congestion selection.\n",
+ hr_dev->caps.cong_type);
+ hr_dev->caps.cong_type = CONG_TYPE_DCQCN;
+ cong_alg->alg_sel = CONG_DCQCN;
+ cong_alg->alg_sub_sel = UNSUPPORT_CONG_LEVEL;
+ cong_alg->dip_vld = DIP_INVALID;
+ cong_alg->wnd_mode_sel = WND_LIMIT;
+ break;
}
return 0;
diff --git a/drivers/infiniband/hw/irdma/hw.c b/drivers/infiniband/hw/irdma/hw.c
index 8fa7e4a18e73..bd4b2b896444 100644
--- a/drivers/infiniband/hw/irdma/hw.c
+++ b/drivers/infiniband/hw/irdma/hw.c
@@ -321,7 +321,11 @@ static void irdma_process_aeq(struct irdma_pci_f *rf)
break;
case IRDMA_AE_QP_SUSPEND_COMPLETE:
if (iwqp->iwdev->vsi.tc_change_pending) {
- atomic_dec(&iwqp->sc_qp.vsi->qp_suspend_reqs);
+ if (!atomic_dec_return(&qp->vsi->qp_suspend_reqs))
+ wake_up(&iwqp->iwdev->suspend_wq);
+ }
+ if (iwqp->suspend_pending) {
+ iwqp->suspend_pending = false;
wake_up(&iwqp->iwdev->suspend_wq);
}
break;
@@ -581,9 +585,6 @@ static void irdma_destroy_cqp(struct irdma_pci_f *rf)
struct irdma_cqp *cqp = &rf->cqp;
int status = 0;
- if (rf->cqp_cmpl_wq)
- destroy_workqueue(rf->cqp_cmpl_wq);
-
status = irdma_sc_cqp_destroy(dev->cqp);
if (status)
ibdev_dbg(to_ibdev(dev), "ERR: Destroy CQP failed %d\n", status);
@@ -748,6 +749,9 @@ static void irdma_destroy_ccq(struct irdma_pci_f *rf)
struct irdma_ccq *ccq = &rf->ccq;
int status = 0;
+ if (rf->cqp_cmpl_wq)
+ destroy_workqueue(rf->cqp_cmpl_wq);
+
if (!rf->reset)
status = irdma_sc_ccq_destroy(dev->ccq, 0, true);
if (status)
@@ -1180,7 +1184,6 @@ static int irdma_create_ceq(struct irdma_pci_f *rf, struct irdma_ceq *iwceq,
int status;
struct irdma_ceq_init_info info = {};
struct irdma_sc_dev *dev = &rf->sc_dev;
- u64 scratch;
u32 ceq_size;
info.ceq_id = ceq_id;
@@ -1201,14 +1204,13 @@ static int irdma_create_ceq(struct irdma_pci_f *rf, struct irdma_ceq *iwceq,
iwceq->sc_ceq.ceq_id = ceq_id;
info.dev = dev;
info.vsi = vsi;
- scratch = (uintptr_t)&rf->cqp.sc_cqp;
status = irdma_sc_ceq_init(&iwceq->sc_ceq, &info);
if (!status) {
if (dev->ceq_valid)
status = irdma_cqp_ceq_cmd(&rf->sc_dev, &iwceq->sc_ceq,
IRDMA_OP_CEQ_CREATE);
else
- status = irdma_sc_cceq_create(&iwceq->sc_ceq, scratch);
+ status = irdma_sc_cceq_create(&iwceq->sc_ceq, 0);
}
if (status) {
diff --git a/drivers/infiniband/hw/irdma/main.c b/drivers/infiniband/hw/irdma/main.c
index 9ac48b4dab41..3f13200ff71b 100644
--- a/drivers/infiniband/hw/irdma/main.c
+++ b/drivers/infiniband/hw/irdma/main.c
@@ -48,7 +48,7 @@ static void irdma_prep_tc_change(struct irdma_device *iwdev)
/* Wait for all qp's to suspend */
wait_event_timeout(iwdev->suspend_wq,
!atomic_read(&iwdev->vsi.qp_suspend_reqs),
- IRDMA_EVENT_TIMEOUT);
+ msecs_to_jiffies(IRDMA_EVENT_TIMEOUT_MS));
irdma_ws_reset(&iwdev->vsi);
}
diff --git a/drivers/infiniband/hw/irdma/main.h b/drivers/infiniband/hw/irdma/main.h
index d66d87bb8bc4..b65bc2ea542f 100644
--- a/drivers/infiniband/hw/irdma/main.h
+++ b/drivers/infiniband/hw/irdma/main.h
@@ -78,7 +78,7 @@ extern struct auxiliary_driver i40iw_auxiliary_drv;
#define MAX_DPC_ITERATIONS 128
-#define IRDMA_EVENT_TIMEOUT 50000
+#define IRDMA_EVENT_TIMEOUT_MS 5000
#define IRDMA_VCHNL_EVENT_TIMEOUT 100000
#define IRDMA_RST_TIMEOUT_HZ 4
diff --git a/drivers/infiniband/hw/irdma/verbs.c b/drivers/infiniband/hw/irdma/verbs.c
index 2138f0a2ff85..b5eb8d421988 100644
--- a/drivers/infiniband/hw/irdma/verbs.c
+++ b/drivers/infiniband/hw/irdma/verbs.c
@@ -1157,6 +1157,21 @@ exit:
return prio;
}
+static int irdma_wait_for_suspend(struct irdma_qp *iwqp)
+{
+ if (!wait_event_timeout(iwqp->iwdev->suspend_wq,
+ !iwqp->suspend_pending,
+ msecs_to_jiffies(IRDMA_EVENT_TIMEOUT_MS))) {
+ iwqp->suspend_pending = false;
+ ibdev_warn(&iwqp->iwdev->ibdev,
+ "modify_qp timed out waiting for suspend. qp_id = %d, last_ae = 0x%x\n",
+ iwqp->ibqp.qp_num, iwqp->last_aeq);
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
/**
* irdma_modify_qp_roce - modify qp request
* @ibqp: qp's pointer for modify
@@ -1420,17 +1435,11 @@ int irdma_modify_qp_roce(struct ib_qp *ibqp, struct ib_qp_attr *attr,
info.next_iwarp_state = IRDMA_QP_STATE_SQD;
issue_modify_qp = 1;
+ iwqp->suspend_pending = true;
break;
case IB_QPS_SQE:
case IB_QPS_ERR:
case IB_QPS_RESET:
- if (iwqp->iwarp_state == IRDMA_QP_STATE_RTS) {
- spin_unlock_irqrestore(&iwqp->lock, flags);
- info.next_iwarp_state = IRDMA_QP_STATE_SQD;
- irdma_hw_modify_qp(iwdev, iwqp, &info, true);
- spin_lock_irqsave(&iwqp->lock, flags);
- }
-
if (iwqp->iwarp_state == IRDMA_QP_STATE_ERROR) {
spin_unlock_irqrestore(&iwqp->lock, flags);
if (udata && udata->inlen) {
@@ -1467,6 +1476,11 @@ int irdma_modify_qp_roce(struct ib_qp *ibqp, struct ib_qp_attr *attr,
ctx_info->rem_endpoint_idx = udp_info->arp_idx;
if (irdma_hw_modify_qp(iwdev, iwqp, &info, true))
return -EINVAL;
+ if (info.next_iwarp_state == IRDMA_QP_STATE_SQD) {
+ ret = irdma_wait_for_suspend(iwqp);
+ if (ret)
+ return ret;
+ }
spin_lock_irqsave(&iwqp->lock, flags);
if (iwqp->iwarp_state == info.curr_iwarp_state) {
iwqp->iwarp_state = info.next_iwarp_state;
@@ -2900,7 +2914,7 @@ static struct irdma_mr *irdma_alloc_iwmr(struct ib_umem *region,
iwmr->type = reg_type;
pgsz_bitmap = (reg_type == IRDMA_MEMREG_TYPE_MEM) ?
- iwdev->rf->sc_dev.hw_attrs.page_size_cap : PAGE_SIZE;
+ iwdev->rf->sc_dev.hw_attrs.page_size_cap : SZ_4K;
iwmr->page_size = ib_umem_find_best_pgsz(region, pgsz_bitmap, virt);
if (unlikely(!iwmr->page_size)) {
@@ -2932,6 +2946,11 @@ static int irdma_reg_user_mr_type_qp(struct irdma_mem_reg_req req,
int err;
u8 lvl;
+ /* iWarp: Catch page not starting on OS page boundary */
+ if (!rdma_protocol_roce(&iwdev->ibdev, 1) &&
+ ib_umem_offset(iwmr->region))
+ return -EINVAL;
+
total = req.sq_pages + req.rq_pages + 1;
if (total > iwmr->page_cnt)
return -EINVAL;
diff --git a/drivers/infiniband/hw/irdma/verbs.h b/drivers/infiniband/hw/irdma/verbs.h
index c42ac22de00e..cfa140b36395 100644
--- a/drivers/infiniband/hw/irdma/verbs.h
+++ b/drivers/infiniband/hw/irdma/verbs.h
@@ -198,6 +198,7 @@ struct irdma_qp {
u8 flush_issued : 1;
u8 sig_all : 1;
u8 pau_mode : 1;
+ u8 suspend_pending : 1;
u8 rsvd : 1;
u8 iwarp_state;
u16 term_sq_flush_code;
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-clt.c b/drivers/infiniband/ulp/rtrs/rtrs-clt.c
index 07261523c554..7f3167ce2972 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs-clt.c
+++ b/drivers/infiniband/ulp/rtrs/rtrs-clt.c
@@ -384,7 +384,7 @@ static void complete_rdma_req(struct rtrs_clt_io_req *req, int errno,
struct rtrs_clt_path *clt_path;
int err;
- if (WARN_ON(!req->in_use))
+ if (!req->in_use)
return;
if (WARN_ON(!req->con))
return;
@@ -1699,7 +1699,7 @@ static int create_con_cq_qp(struct rtrs_clt_con *con)
clt_path->s.dev_ref++;
max_send_wr = min_t(int, wr_limit,
/* QD * (REQ + RSP + FR REGS or INVS) + drain */
- clt_path->queue_depth * 3 + 1);
+ clt_path->queue_depth * 4 + 1);
max_recv_wr = min_t(int, wr_limit,
clt_path->queue_depth * 3 + 1);
max_send_sge = 2;
@@ -2350,8 +2350,6 @@ static int init_conns(struct rtrs_clt_path *clt_path)
if (err)
goto destroy;
- rtrs_start_hb(&clt_path->s);
-
return 0;
destroy:
@@ -2625,6 +2623,7 @@ static int init_path(struct rtrs_clt_path *clt_path)
goto out;
}
rtrs_clt_path_up(clt_path);
+ rtrs_start_hb(&clt_path->s);
out:
mutex_unlock(&clt_path->init_mutex);
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-srv.c b/drivers/infiniband/ulp/rtrs/rtrs-srv.c
index 75e56604e462..1d33efb8fb03 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs-srv.c
+++ b/drivers/infiniband/ulp/rtrs/rtrs-srv.c
@@ -65,8 +65,9 @@ static bool rtrs_srv_change_state(struct rtrs_srv_path *srv_path,
{
enum rtrs_srv_state old_state;
bool changed = false;
+ unsigned long flags;
- spin_lock_irq(&srv_path->state_lock);
+ spin_lock_irqsave(&srv_path->state_lock, flags);
old_state = srv_path->state;
switch (new_state) {
case RTRS_SRV_CONNECTED:
@@ -87,7 +88,7 @@ static bool rtrs_srv_change_state(struct rtrs_srv_path *srv_path,
}
if (changed)
srv_path->state = new_state;
- spin_unlock_irq(&srv_path->state_lock);
+ spin_unlock_irqrestore(&srv_path->state_lock, flags);
return changed;
}
@@ -550,7 +551,10 @@ static void unmap_cont_bufs(struct rtrs_srv_path *srv_path)
struct rtrs_srv_mr *srv_mr;
srv_mr = &srv_path->mrs[i];
- rtrs_iu_free(srv_mr->iu, srv_path->s.dev->ib_dev, 1);
+
+ if (always_invalidate)
+ rtrs_iu_free(srv_mr->iu, srv_path->s.dev->ib_dev, 1);
+
ib_dereg_mr(srv_mr->mr);
ib_dma_unmap_sg(srv_path->s.dev->ib_dev, srv_mr->sgt.sgl,
srv_mr->sgt.nents, DMA_BIDIRECTIONAL);
@@ -709,20 +713,23 @@ static void rtrs_srv_info_rsp_done(struct ib_cq *cq, struct ib_wc *wc)
WARN_ON(wc->opcode != IB_WC_SEND);
}
-static void rtrs_srv_path_up(struct rtrs_srv_path *srv_path)
+static int rtrs_srv_path_up(struct rtrs_srv_path *srv_path)
{
struct rtrs_srv_sess *srv = srv_path->srv;
struct rtrs_srv_ctx *ctx = srv->ctx;
- int up;
+ int up, ret = 0;
mutex_lock(&srv->paths_ev_mutex);
up = ++srv->paths_up;
if (up == 1)
- ctx->ops.link_ev(srv, RTRS_SRV_LINK_EV_CONNECTED, NULL);
+ ret = ctx->ops.link_ev(srv, RTRS_SRV_LINK_EV_CONNECTED, NULL);
mutex_unlock(&srv->paths_ev_mutex);
/* Mark session as established */
- srv_path->established = true;
+ if (!ret)
+ srv_path->established = true;
+
+ return ret;
}
static void rtrs_srv_path_down(struct rtrs_srv_path *srv_path)
@@ -851,7 +858,12 @@ static int process_info_req(struct rtrs_srv_con *con,
goto iu_free;
kobject_get(&srv_path->kobj);
get_device(&srv_path->srv->dev);
- rtrs_srv_change_state(srv_path, RTRS_SRV_CONNECTED);
+ err = rtrs_srv_change_state(srv_path, RTRS_SRV_CONNECTED);
+ if (!err) {
+ rtrs_err(s, "rtrs_srv_change_state(), err: %d\n", err);
+ goto iu_free;
+ }
+
rtrs_srv_start_hb(srv_path);
/*
@@ -860,7 +872,11 @@ static int process_info_req(struct rtrs_srv_con *con,
* all connections are successfully established. Thus, simply notify
* listener with a proper event if we are the first path.
*/
- rtrs_srv_path_up(srv_path);
+ err = rtrs_srv_path_up(srv_path);
+ if (err) {
+ rtrs_err(s, "rtrs_srv_path_up(), err: %d\n", err);
+ goto iu_free;
+ }
ib_dma_sync_single_for_device(srv_path->s.dev->ib_dev,
tx_iu->dma_addr,
@@ -1516,7 +1532,6 @@ static void rtrs_srv_close_work(struct work_struct *work)
srv_path = container_of(work, typeof(*srv_path), close_work);
- rtrs_srv_destroy_path_files(srv_path);
rtrs_srv_stop_hb(srv_path);
for (i = 0; i < srv_path->s.con_num; i++) {
@@ -1536,6 +1551,8 @@ static void rtrs_srv_close_work(struct work_struct *work)
/* Wait for all completion */
wait_for_completion(&srv_path->complete_done);
+ rtrs_srv_destroy_path_files(srv_path);
+
/* Notify upper layer if we are the last path */
rtrs_srv_path_down(srv_path);
diff --git a/drivers/iommu/intel/dmar.c b/drivers/iommu/intel/dmar.c
index a3414afe11b0..23cb80d62a9a 100644
--- a/drivers/iommu/intel/dmar.c
+++ b/drivers/iommu/intel/dmar.c
@@ -1522,6 +1522,15 @@ void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 pfsid,
{
struct qi_desc desc;
+ /*
+ * VT-d spec, section 4.3:
+ *
+ * Software is recommended to not submit any Device-TLB invalidation
+ * requests while address remapping hardware is disabled.
+ */
+ if (!(iommu->gcmd & DMA_GCMD_TE))
+ return;
+
if (mask) {
addr |= (1ULL << (VTD_PAGE_SHIFT + mask - 1)) - 1;
desc.qw1 = QI_DEV_IOTLB_ADDR(addr) | QI_DEV_IOTLB_SIZE;
@@ -1587,6 +1596,15 @@ void qi_flush_dev_iotlb_pasid(struct intel_iommu *iommu, u16 sid, u16 pfsid,
unsigned long mask = 1UL << (VTD_PAGE_SHIFT + size_order - 1);
struct qi_desc desc = {.qw1 = 0, .qw2 = 0, .qw3 = 0};
+ /*
+ * VT-d spec, section 4.3:
+ *
+ * Software is recommended to not submit any Device-TLB invalidation
+ * requests while address remapping hardware is disabled.
+ */
+ if (!(iommu->gcmd & DMA_GCMD_TE))
+ return;
+
desc.qw0 = QI_DEV_EIOTLB_PASID(pasid) | QI_DEV_EIOTLB_SID(sid) |
QI_DEV_EIOTLB_QDEP(qdep) | QI_DEIOTLB_TYPE |
QI_DEV_IOTLB_PFSID(pfsid);
diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
index 3531b956556c..897159dba47d 100644
--- a/drivers/iommu/intel/iommu.c
+++ b/drivers/iommu/intel/iommu.c
@@ -299,7 +299,7 @@ static int iommu_skip_te_disable;
#define IDENTMAP_AZALIA 4
const struct iommu_ops intel_iommu_ops;
-const struct iommu_dirty_ops intel_dirty_ops;
+static const struct iommu_dirty_ops intel_dirty_ops;
static bool translation_pre_enabled(struct intel_iommu *iommu)
{
@@ -2207,6 +2207,8 @@ __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
attr |= DMA_FL_PTE_DIRTY;
}
+ domain->has_mappings = true;
+
pteval = ((phys_addr_t)phys_pfn << VTD_PAGE_SHIFT) | attr;
while (nr_pages > 0) {
@@ -2490,7 +2492,8 @@ static int dmar_domain_attach_device(struct dmar_domain *domain,
return ret;
}
- iommu_enable_pci_caps(info);
+ if (sm_supported(info->iommu) || !domain_type_is_si(info->domain))
+ iommu_enable_pci_caps(info);
return 0;
}
@@ -3925,8 +3928,8 @@ static int domain_context_clear_one_cb(struct pci_dev *pdev, u16 alias, void *op
*/
static void domain_context_clear(struct device_domain_info *info)
{
- if (!info->iommu || !info->dev || !dev_is_pci(info->dev))
- return;
+ if (!dev_is_pci(info->dev))
+ domain_context_clear_one(info, info->bus, info->devfn);
pci_for_each_dma_alias(to_pci_dev(info->dev),
&domain_context_clear_one_cb, info);
@@ -4360,7 +4363,8 @@ static bool intel_iommu_enforce_cache_coherency(struct iommu_domain *domain)
return true;
spin_lock_irqsave(&dmar_domain->lock, flags);
- if (!domain_support_force_snooping(dmar_domain)) {
+ if (!domain_support_force_snooping(dmar_domain) ||
+ (!dmar_domain->use_first_level && dmar_domain->has_mappings)) {
spin_unlock_irqrestore(&dmar_domain->lock, flags);
return false;
}
@@ -4925,7 +4929,7 @@ static int intel_iommu_read_and_clear_dirty(struct iommu_domain *domain,
return 0;
}
-const struct iommu_dirty_ops intel_dirty_ops = {
+static const struct iommu_dirty_ops intel_dirty_ops = {
.set_dirty_tracking = intel_iommu_set_dirty_tracking,
.read_and_clear_dirty = intel_iommu_read_and_clear_dirty,
};
@@ -5073,7 +5077,7 @@ static void quirk_igfx_skip_te_disable(struct pci_dev *dev)
ver = (dev->device >> 8) & 0xff;
if (ver != 0x45 && ver != 0x46 && ver != 0x4c &&
ver != 0x4e && ver != 0x8a && ver != 0x98 &&
- ver != 0x9a && ver != 0xa7)
+ ver != 0x9a && ver != 0xa7 && ver != 0x7d)
return;
if (risky_device(dev))
diff --git a/drivers/iommu/intel/iommu.h b/drivers/iommu/intel/iommu.h
index 65d37a138c75..ce030c5b5772 100644
--- a/drivers/iommu/intel/iommu.h
+++ b/drivers/iommu/intel/iommu.h
@@ -602,6 +602,9 @@ struct dmar_domain {
*/
u8 dirty_tracking:1; /* Dirty tracking is enabled */
u8 nested_parent:1; /* Has other domains nested on it */
+ u8 has_mappings:1; /* Has mappings configured through
+ * iommu_map() interface.
+ */
spinlock_t lock; /* Protect device tracking lists */
struct list_head devices; /* all devices' list */
diff --git a/drivers/iommu/intel/svm.c b/drivers/iommu/intel/svm.c
index 50a481c895b8..ac12f76c1212 100644
--- a/drivers/iommu/intel/svm.c
+++ b/drivers/iommu/intel/svm.c
@@ -216,6 +216,27 @@ static void intel_flush_svm_range(struct intel_svm *svm, unsigned long address,
rcu_read_unlock();
}
+static void intel_flush_svm_all(struct intel_svm *svm)
+{
+ struct device_domain_info *info;
+ struct intel_svm_dev *sdev;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(sdev, &svm->devs, list) {
+ info = dev_iommu_priv_get(sdev->dev);
+
+ qi_flush_piotlb(sdev->iommu, sdev->did, svm->pasid, 0, -1UL, 0);
+ if (info->ats_enabled) {
+ qi_flush_dev_iotlb_pasid(sdev->iommu, sdev->sid, info->pfsid,
+ svm->pasid, sdev->qdep,
+ 0, 64 - VTD_PAGE_SHIFT);
+ quirk_extra_dev_tlb_flush(info, 0, 64 - VTD_PAGE_SHIFT,
+ svm->pasid, sdev->qdep);
+ }
+ }
+ rcu_read_unlock();
+}
+
/* Pages have been freed at this point */
static void intel_arch_invalidate_secondary_tlbs(struct mmu_notifier *mn,
struct mm_struct *mm,
@@ -223,6 +244,11 @@ static void intel_arch_invalidate_secondary_tlbs(struct mmu_notifier *mn,
{
struct intel_svm *svm = container_of(mn, struct intel_svm, notifier);
+ if (start == 0 && end == -1UL) {
+ intel_flush_svm_all(svm);
+ return;
+ }
+
intel_flush_svm_range(svm, start,
(end - start + PAGE_SIZE - 1) >> VTD_PAGE_SHIFT, 0);
}
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index f17a1113f3d6..33e2a9b5d339 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -485,11 +485,12 @@ static void iommu_deinit_device(struct device *dev)
dev_iommu_free(dev);
}
+DEFINE_MUTEX(iommu_probe_device_lock);
+
static int __iommu_probe_device(struct device *dev, struct list_head *group_list)
{
const struct iommu_ops *ops = dev->bus->iommu_ops;
struct iommu_group *group;
- static DEFINE_MUTEX(iommu_probe_device_lock);
struct group_device *gdev;
int ret;
@@ -502,17 +503,15 @@ static int __iommu_probe_device(struct device *dev, struct list_head *group_list
* probably be able to use device_lock() here to minimise the scope,
* but for now enforcing a simple global ordering is fine.
*/
- mutex_lock(&iommu_probe_device_lock);
+ lockdep_assert_held(&iommu_probe_device_lock);
/* Device is probed already if in a group */
- if (dev->iommu_group) {
- ret = 0;
- goto out_unlock;
- }
+ if (dev->iommu_group)
+ return 0;
ret = iommu_init_device(dev, ops);
if (ret)
- goto out_unlock;
+ return ret;
group = dev->iommu_group;
gdev = iommu_group_alloc_device(group, dev);
@@ -548,7 +547,6 @@ static int __iommu_probe_device(struct device *dev, struct list_head *group_list
list_add_tail(&group->entry, group_list);
}
mutex_unlock(&group->mutex);
- mutex_unlock(&iommu_probe_device_lock);
if (dev_is_pci(dev))
iommu_dma_set_pci_32bit_workaround(dev);
@@ -562,8 +560,6 @@ err_put_group:
iommu_deinit_device(dev);
mutex_unlock(&group->mutex);
iommu_group_put(group);
-out_unlock:
- mutex_unlock(&iommu_probe_device_lock);
return ret;
}
@@ -573,7 +569,9 @@ int iommu_probe_device(struct device *dev)
const struct iommu_ops *ops;
int ret;
+ mutex_lock(&iommu_probe_device_lock);
ret = __iommu_probe_device(dev, NULL);
+ mutex_unlock(&iommu_probe_device_lock);
if (ret)
return ret;
@@ -1788,7 +1786,7 @@ iommu_group_alloc_default_domain(struct iommu_group *group, int req_type)
*/
if (ops->default_domain) {
if (req_type)
- return NULL;
+ return ERR_PTR(-EINVAL);
return ops->default_domain;
}
@@ -1797,15 +1795,15 @@ iommu_group_alloc_default_domain(struct iommu_group *group, int req_type)
/* The driver gave no guidance on what type to use, try the default */
dom = __iommu_group_alloc_default_domain(group, iommu_def_domain_type);
- if (dom)
+ if (!IS_ERR(dom))
return dom;
/* Otherwise IDENTITY and DMA_FQ defaults will try DMA */
if (iommu_def_domain_type == IOMMU_DOMAIN_DMA)
- return NULL;
+ return ERR_PTR(-EINVAL);
dom = __iommu_group_alloc_default_domain(group, IOMMU_DOMAIN_DMA);
- if (!dom)
- return NULL;
+ if (IS_ERR(dom))
+ return dom;
pr_warn("Failed to allocate default IOMMU domain of type %u for group %s - Falling back to IOMMU_DOMAIN_DMA",
iommu_def_domain_type, group->name);
@@ -1822,7 +1820,9 @@ static int probe_iommu_group(struct device *dev, void *data)
struct list_head *group_list = data;
int ret;
+ mutex_lock(&iommu_probe_device_lock);
ret = __iommu_probe_device(dev, group_list);
+ mutex_unlock(&iommu_probe_device_lock);
if (ret == -ENODEV)
ret = 0;
@@ -2094,10 +2094,17 @@ static struct iommu_domain *__iommu_domain_alloc(const struct iommu_ops *ops,
else if (ops->domain_alloc)
domain = ops->domain_alloc(alloc_type);
else
- return NULL;
+ return ERR_PTR(-EOPNOTSUPP);
+ /*
+ * Many domain_alloc ops now return ERR_PTR, make things easier for the
+ * driver by accepting ERR_PTR from all domain_alloc ops instead of
+ * having two rules.
+ */
+ if (IS_ERR(domain))
+ return domain;
if (!domain)
- return NULL;
+ return ERR_PTR(-ENOMEM);
domain->type = type;
/*
@@ -2110,9 +2117,14 @@ static struct iommu_domain *__iommu_domain_alloc(const struct iommu_ops *ops,
if (!domain->ops)
domain->ops = ops->default_domain_ops;
- if (iommu_is_dma_domain(domain) && iommu_get_dma_cookie(domain)) {
- iommu_domain_free(domain);
- domain = NULL;
+ if (iommu_is_dma_domain(domain)) {
+ int rc;
+
+ rc = iommu_get_dma_cookie(domain);
+ if (rc) {
+ iommu_domain_free(domain);
+ return ERR_PTR(rc);
+ }
}
return domain;
}
@@ -2129,10 +2141,15 @@ __iommu_group_domain_alloc(struct iommu_group *group, unsigned int type)
struct iommu_domain *iommu_domain_alloc(const struct bus_type *bus)
{
+ struct iommu_domain *domain;
+
if (bus == NULL || bus->iommu_ops == NULL)
return NULL;
- return __iommu_domain_alloc(bus->iommu_ops, NULL,
+ domain = __iommu_domain_alloc(bus->iommu_ops, NULL,
IOMMU_DOMAIN_UNMANAGED);
+ if (IS_ERR(domain))
+ return NULL;
+ return domain;
}
EXPORT_SYMBOL_GPL(iommu_domain_alloc);
@@ -3041,8 +3058,8 @@ static int iommu_setup_default_domain(struct iommu_group *group,
return -EINVAL;
dom = iommu_group_alloc_default_domain(group, req_type);
- if (!dom)
- return -ENODEV;
+ if (IS_ERR(dom))
+ return PTR_ERR(dom);
if (group->default_domain == dom)
return 0;
@@ -3243,21 +3260,23 @@ void iommu_device_unuse_default_domain(struct device *dev)
static int __iommu_group_alloc_blocking_domain(struct iommu_group *group)
{
+ struct iommu_domain *domain;
+
if (group->blocking_domain)
return 0;
- group->blocking_domain =
- __iommu_group_domain_alloc(group, IOMMU_DOMAIN_BLOCKED);
- if (!group->blocking_domain) {
+ domain = __iommu_group_domain_alloc(group, IOMMU_DOMAIN_BLOCKED);
+ if (IS_ERR(domain)) {
/*
* For drivers that do not yet understand IOMMU_DOMAIN_BLOCKED
* create an empty domain instead.
*/
- group->blocking_domain = __iommu_group_domain_alloc(
- group, IOMMU_DOMAIN_UNMANAGED);
- if (!group->blocking_domain)
- return -EINVAL;
+ domain = __iommu_group_domain_alloc(group,
+ IOMMU_DOMAIN_UNMANAGED);
+ if (IS_ERR(domain))
+ return PTR_ERR(domain);
}
+ group->blocking_domain = domain;
return 0;
}
diff --git a/drivers/iommu/iommufd/device.c b/drivers/iommu/iommufd/device.c
index 59d3a07300d9..873630c111c1 100644
--- a/drivers/iommu/iommufd/device.c
+++ b/drivers/iommu/iommufd/device.c
@@ -571,7 +571,7 @@ iommufd_device_auto_get_domain(struct iommufd_device *idev,
continue;
destroy_hwpt = (*do_attach)(idev, hwpt);
if (IS_ERR(destroy_hwpt)) {
- iommufd_put_object(&hwpt->obj);
+ iommufd_put_object(idev->ictx, &hwpt->obj);
/*
* -EINVAL means the domain is incompatible with the
* device. Other error codes should propagate to
@@ -583,7 +583,7 @@ iommufd_device_auto_get_domain(struct iommufd_device *idev,
goto out_unlock;
}
*pt_id = hwpt->obj.id;
- iommufd_put_object(&hwpt->obj);
+ iommufd_put_object(idev->ictx, &hwpt->obj);
goto out_unlock;
}
@@ -652,7 +652,7 @@ static int iommufd_device_change_pt(struct iommufd_device *idev, u32 *pt_id,
destroy_hwpt = ERR_PTR(-EINVAL);
goto out_put_pt_obj;
}
- iommufd_put_object(pt_obj);
+ iommufd_put_object(idev->ictx, pt_obj);
/* This destruction has to be after we unlock everything */
if (destroy_hwpt)
@@ -660,7 +660,7 @@ static int iommufd_device_change_pt(struct iommufd_device *idev, u32 *pt_id,
return 0;
out_put_pt_obj:
- iommufd_put_object(pt_obj);
+ iommufd_put_object(idev->ictx, pt_obj);
return PTR_ERR(destroy_hwpt);
}
@@ -792,7 +792,7 @@ static int iommufd_access_change_ioas_id(struct iommufd_access *access, u32 id)
if (IS_ERR(ioas))
return PTR_ERR(ioas);
rc = iommufd_access_change_ioas(access, ioas);
- iommufd_put_object(&ioas->obj);
+ iommufd_put_object(access->ictx, &ioas->obj);
return rc;
}
@@ -941,7 +941,7 @@ void iommufd_access_notify_unmap(struct io_pagetable *iopt, unsigned long iova,
access->ops->unmap(access->data, iova, length);
- iommufd_put_object(&access->obj);
+ iommufd_put_object(access->ictx, &access->obj);
xa_lock(&ioas->iopt.access_list);
}
xa_unlock(&ioas->iopt.access_list);
@@ -1243,6 +1243,6 @@ int iommufd_get_hw_info(struct iommufd_ucmd *ucmd)
out_free:
kfree(data);
out_put:
- iommufd_put_object(&idev->obj);
+ iommufd_put_object(ucmd->ictx, &idev->obj);
return rc;
}
diff --git a/drivers/iommu/iommufd/hw_pagetable.c b/drivers/iommu/iommufd/hw_pagetable.c
index 2abbeafdbd22..cbb5df0a6c32 100644
--- a/drivers/iommu/iommufd/hw_pagetable.c
+++ b/drivers/iommu/iommufd/hw_pagetable.c
@@ -318,9 +318,9 @@ out_unlock:
if (ioas)
mutex_unlock(&ioas->mutex);
out_put_pt:
- iommufd_put_object(pt_obj);
+ iommufd_put_object(ucmd->ictx, pt_obj);
out_put_idev:
- iommufd_put_object(&idev->obj);
+ iommufd_put_object(ucmd->ictx, &idev->obj);
return rc;
}
@@ -345,7 +345,7 @@ int iommufd_hwpt_set_dirty_tracking(struct iommufd_ucmd *ucmd)
rc = iopt_set_dirty_tracking(&ioas->iopt, hwpt_paging->common.domain,
enable);
- iommufd_put_object(&hwpt_paging->common.obj);
+ iommufd_put_object(ucmd->ictx, &hwpt_paging->common.obj);
return rc;
}
@@ -368,6 +368,6 @@ int iommufd_hwpt_get_dirty_bitmap(struct iommufd_ucmd *ucmd)
rc = iopt_read_and_clear_dirty_data(
&ioas->iopt, hwpt_paging->common.domain, cmd->flags, cmd);
- iommufd_put_object(&hwpt_paging->common.obj);
+ iommufd_put_object(ucmd->ictx, &hwpt_paging->common.obj);
return rc;
}
diff --git a/drivers/iommu/iommufd/ioas.c b/drivers/iommu/iommufd/ioas.c
index d5624577f79f..742248276548 100644
--- a/drivers/iommu/iommufd/ioas.c
+++ b/drivers/iommu/iommufd/ioas.c
@@ -105,7 +105,7 @@ int iommufd_ioas_iova_ranges(struct iommufd_ucmd *ucmd)
rc = -EMSGSIZE;
out_put:
up_read(&ioas->iopt.iova_rwsem);
- iommufd_put_object(&ioas->obj);
+ iommufd_put_object(ucmd->ictx, &ioas->obj);
return rc;
}
@@ -175,7 +175,7 @@ out_free:
interval_tree_remove(node, &allowed_iova);
kfree(container_of(node, struct iopt_allowed, node));
}
- iommufd_put_object(&ioas->obj);
+ iommufd_put_object(ucmd->ictx, &ioas->obj);
return rc;
}
@@ -228,7 +228,7 @@ int iommufd_ioas_map(struct iommufd_ucmd *ucmd)
cmd->iova = iova;
rc = iommufd_ucmd_respond(ucmd, sizeof(*cmd));
out_put:
- iommufd_put_object(&ioas->obj);
+ iommufd_put_object(ucmd->ictx, &ioas->obj);
return rc;
}
@@ -258,7 +258,7 @@ int iommufd_ioas_copy(struct iommufd_ucmd *ucmd)
return PTR_ERR(src_ioas);
rc = iopt_get_pages(&src_ioas->iopt, cmd->src_iova, cmd->length,
&pages_list);
- iommufd_put_object(&src_ioas->obj);
+ iommufd_put_object(ucmd->ictx, &src_ioas->obj);
if (rc)
return rc;
@@ -279,7 +279,7 @@ int iommufd_ioas_copy(struct iommufd_ucmd *ucmd)
cmd->dst_iova = iova;
rc = iommufd_ucmd_respond(ucmd, sizeof(*cmd));
out_put_dst:
- iommufd_put_object(&dst_ioas->obj);
+ iommufd_put_object(ucmd->ictx, &dst_ioas->obj);
out_pages:
iopt_free_pages_list(&pages_list);
return rc;
@@ -315,7 +315,7 @@ int iommufd_ioas_unmap(struct iommufd_ucmd *ucmd)
rc = iommufd_ucmd_respond(ucmd, sizeof(*cmd));
out_put:
- iommufd_put_object(&ioas->obj);
+ iommufd_put_object(ucmd->ictx, &ioas->obj);
return rc;
}
@@ -393,6 +393,6 @@ int iommufd_ioas_option(struct iommufd_ucmd *ucmd)
rc = -EOPNOTSUPP;
}
- iommufd_put_object(&ioas->obj);
+ iommufd_put_object(ucmd->ictx, &ioas->obj);
return rc;
}
diff --git a/drivers/iommu/iommufd/iommufd_private.h b/drivers/iommu/iommufd/iommufd_private.h
index a74cfefffbc6..abae041e256f 100644
--- a/drivers/iommu/iommufd/iommufd_private.h
+++ b/drivers/iommu/iommufd/iommufd_private.h
@@ -21,6 +21,7 @@ struct iommufd_ctx {
struct file *file;
struct xarray objects;
struct xarray groups;
+ wait_queue_head_t destroy_wait;
u8 account_mode;
/* Compatibility with VFIO no iommu */
@@ -135,7 +136,7 @@ enum iommufd_object_type {
/* Base struct for all objects with a userspace ID handle. */
struct iommufd_object {
- struct rw_semaphore destroy_rwsem;
+ refcount_t shortterm_users;
refcount_t users;
enum iommufd_object_type type;
unsigned int id;
@@ -143,10 +144,15 @@ struct iommufd_object {
static inline bool iommufd_lock_obj(struct iommufd_object *obj)
{
- if (!down_read_trylock(&obj->destroy_rwsem))
+ if (!refcount_inc_not_zero(&obj->users))
return false;
- if (!refcount_inc_not_zero(&obj->users)) {
- up_read(&obj->destroy_rwsem);
+ if (!refcount_inc_not_zero(&obj->shortterm_users)) {
+ /*
+ * If the caller doesn't already have a ref on obj this must be
+ * called under the xa_lock. Otherwise the caller is holding a
+ * ref on users. Thus it cannot be one before this decrement.
+ */
+ refcount_dec(&obj->users);
return false;
}
return true;
@@ -154,10 +160,16 @@ static inline bool iommufd_lock_obj(struct iommufd_object *obj)
struct iommufd_object *iommufd_get_object(struct iommufd_ctx *ictx, u32 id,
enum iommufd_object_type type);
-static inline void iommufd_put_object(struct iommufd_object *obj)
+static inline void iommufd_put_object(struct iommufd_ctx *ictx,
+ struct iommufd_object *obj)
{
+ /*
+ * Users first, then shortterm so that REMOVE_WAIT_SHORTTERM never sees
+ * a spurious !0 users with a 0 shortterm_users.
+ */
refcount_dec(&obj->users);
- up_read(&obj->destroy_rwsem);
+ if (refcount_dec_and_test(&obj->shortterm_users))
+ wake_up_interruptible_all(&ictx->destroy_wait);
}
void iommufd_object_abort(struct iommufd_ctx *ictx, struct iommufd_object *obj);
@@ -165,17 +177,49 @@ void iommufd_object_abort_and_destroy(struct iommufd_ctx *ictx,
struct iommufd_object *obj);
void iommufd_object_finalize(struct iommufd_ctx *ictx,
struct iommufd_object *obj);
-void __iommufd_object_destroy_user(struct iommufd_ctx *ictx,
- struct iommufd_object *obj, bool allow_fail);
+
+enum {
+ REMOVE_WAIT_SHORTTERM = 1,
+};
+int iommufd_object_remove(struct iommufd_ctx *ictx,
+ struct iommufd_object *to_destroy, u32 id,
+ unsigned int flags);
+
+/*
+ * The caller holds a users refcount and wants to destroy the object. At this
+ * point the caller has no shortterm_users reference and at least the xarray
+ * will be holding one.
+ */
static inline void iommufd_object_destroy_user(struct iommufd_ctx *ictx,
struct iommufd_object *obj)
{
- __iommufd_object_destroy_user(ictx, obj, false);
+ int ret;
+
+ ret = iommufd_object_remove(ictx, obj, obj->id, REMOVE_WAIT_SHORTTERM);
+
+ /*
+ * If there is a bug and we couldn't destroy the object then we did put
+ * back the caller's users refcount and will eventually try to free it
+ * again during close.
+ */
+ WARN_ON(ret);
}
-static inline void iommufd_object_deref_user(struct iommufd_ctx *ictx,
- struct iommufd_object *obj)
+
+/*
+ * The HWPT allocated by autodomains is used in possibly many devices and
+ * is automatically destroyed when its refcount reaches zero.
+ *
+ * If userspace uses the HWPT manually, even for a short term, then it will
+ * disrupt this refcounting and the auto-free in the kernel will not work.
+ * Userspace that tries to use the automatically allocated HWPT must be careful
+ * to ensure that it is consistently destroyed, eg by not racing accesses
+ * and by not attaching an automatic HWPT to a device manually.
+ */
+static inline void
+iommufd_object_put_and_try_destroy(struct iommufd_ctx *ictx,
+ struct iommufd_object *obj)
{
- __iommufd_object_destroy_user(ictx, obj, true);
+ iommufd_object_remove(ictx, obj, obj->id, 0);
}
struct iommufd_object *_iommufd_object_alloc(struct iommufd_ctx *ictx,
@@ -311,7 +355,7 @@ static inline void iommufd_hw_pagetable_put(struct iommufd_ctx *ictx,
lockdep_assert_not_held(&hwpt_paging->ioas->mutex);
if (hwpt_paging->auto_domain) {
- iommufd_object_deref_user(ictx, &hwpt->obj);
+ iommufd_object_put_and_try_destroy(ictx, &hwpt->obj);
return;
}
}
diff --git a/drivers/iommu/iommufd/main.c b/drivers/iommu/iommufd/main.c
index 45b9d40773b1..c9091e46d208 100644
--- a/drivers/iommu/iommufd/main.c
+++ b/drivers/iommu/iommufd/main.c
@@ -33,7 +33,6 @@ struct iommufd_object *_iommufd_object_alloc(struct iommufd_ctx *ictx,
size_t size,
enum iommufd_object_type type)
{
- static struct lock_class_key obj_keys[IOMMUFD_OBJ_MAX];
struct iommufd_object *obj;
int rc;
@@ -41,15 +40,8 @@ struct iommufd_object *_iommufd_object_alloc(struct iommufd_ctx *ictx,
if (!obj)
return ERR_PTR(-ENOMEM);
obj->type = type;
- /*
- * In most cases the destroy_rwsem is obtained with try so it doesn't
- * interact with lockdep, however on destroy we have to sleep. This
- * means if we have to destroy an object while holding a get on another
- * object it triggers lockdep. Using one locking class per object type
- * is a simple and reasonable way to avoid this.
- */
- __init_rwsem(&obj->destroy_rwsem, "iommufd_object::destroy_rwsem",
- &obj_keys[type]);
+ /* Starts out bias'd by 1 until it is removed from the xarray */
+ refcount_set(&obj->shortterm_users, 1);
refcount_set(&obj->users, 1);
/*
@@ -129,92 +121,113 @@ struct iommufd_object *iommufd_get_object(struct iommufd_ctx *ictx, u32 id,
return obj;
}
+static int iommufd_object_dec_wait_shortterm(struct iommufd_ctx *ictx,
+ struct iommufd_object *to_destroy)
+{
+ if (refcount_dec_and_test(&to_destroy->shortterm_users))
+ return 0;
+
+ if (wait_event_timeout(ictx->destroy_wait,
+ refcount_read(&to_destroy->shortterm_users) ==
+ 0,
+ msecs_to_jiffies(10000)))
+ return 0;
+
+ pr_crit("Time out waiting for iommufd object to become free\n");
+ refcount_inc(&to_destroy->shortterm_users);
+ return -EBUSY;
+}
+
/*
* Remove the given object id from the xarray if the only reference to the
- * object is held by the xarray. The caller must call ops destroy().
+ * object is held by the xarray.
*/
-static struct iommufd_object *iommufd_object_remove(struct iommufd_ctx *ictx,
- u32 id, bool extra_put)
+int iommufd_object_remove(struct iommufd_ctx *ictx,
+ struct iommufd_object *to_destroy, u32 id,
+ unsigned int flags)
{
struct iommufd_object *obj;
XA_STATE(xas, &ictx->objects, id);
-
- xa_lock(&ictx->objects);
- obj = xas_load(&xas);
- if (xa_is_zero(obj) || !obj) {
- obj = ERR_PTR(-ENOENT);
- goto out_xa;
- }
+ bool zerod_shortterm = false;
+ int ret;
/*
- * If the caller is holding a ref on obj we put it here under the
- * spinlock.
+ * The purpose of the shortterm_users is to ensure deterministic
+ * destruction of objects used by external drivers and destroyed by this
+ * function. Any temporary increment of the refcount must increment
+ * shortterm_users, such as during ioctl execution.
*/
- if (extra_put)
+ if (flags & REMOVE_WAIT_SHORTTERM) {
+ ret = iommufd_object_dec_wait_shortterm(ictx, to_destroy);
+ if (ret) {
+ /*
+ * We have a bug. Put back the callers reference and
+ * defer cleaning this object until close.
+ */
+ refcount_dec(&to_destroy->users);
+ return ret;
+ }
+ zerod_shortterm = true;
+ }
+
+ xa_lock(&ictx->objects);
+ obj = xas_load(&xas);
+ if (to_destroy) {
+ /*
+ * If the caller is holding a ref on obj we put it here under
+ * the spinlock.
+ */
refcount_dec(&obj->users);
+ if (WARN_ON(obj != to_destroy)) {
+ ret = -ENOENT;
+ goto err_xa;
+ }
+ } else if (xa_is_zero(obj) || !obj) {
+ ret = -ENOENT;
+ goto err_xa;
+ }
+
if (!refcount_dec_if_one(&obj->users)) {
- obj = ERR_PTR(-EBUSY);
- goto out_xa;
+ ret = -EBUSY;
+ goto err_xa;
}
xas_store(&xas, NULL);
if (ictx->vfio_ioas == container_of(obj, struct iommufd_ioas, obj))
ictx->vfio_ioas = NULL;
-
-out_xa:
xa_unlock(&ictx->objects);
- /* The returned object reference count is zero */
- return obj;
-}
-
-/*
- * The caller holds a users refcount and wants to destroy the object. Returns
- * true if the object was destroyed. In all cases the caller no longer has a
- * reference on obj.
- */
-void __iommufd_object_destroy_user(struct iommufd_ctx *ictx,
- struct iommufd_object *obj, bool allow_fail)
-{
- struct iommufd_object *ret;
-
/*
- * The purpose of the destroy_rwsem is to ensure deterministic
- * destruction of objects used by external drivers and destroyed by this
- * function. Any temporary increment of the refcount must hold the read
- * side of this, such as during ioctl execution.
- */
- down_write(&obj->destroy_rwsem);
- ret = iommufd_object_remove(ictx, obj->id, true);
- up_write(&obj->destroy_rwsem);
-
- if (allow_fail && IS_ERR(ret))
- return;
-
- /*
- * If there is a bug and we couldn't destroy the object then we did put
- * back the caller's refcount and will eventually try to free it again
- * during close.
+ * Since users is zero any positive users_shortterm must be racing
+ * iommufd_put_object(), or we have a bug.
*/
- if (WARN_ON(IS_ERR(ret)))
- return;
+ if (!zerod_shortterm) {
+ ret = iommufd_object_dec_wait_shortterm(ictx, obj);
+ if (WARN_ON(ret))
+ return ret;
+ }
iommufd_object_ops[obj->type].destroy(obj);
kfree(obj);
+ return 0;
+
+err_xa:
+ if (zerod_shortterm) {
+ /* Restore the xarray owned reference */
+ refcount_set(&obj->shortterm_users, 1);
+ }
+ xa_unlock(&ictx->objects);
+
+ /* The returned object reference count is zero */
+ return ret;
}
static int iommufd_destroy(struct iommufd_ucmd *ucmd)
{
struct iommu_destroy *cmd = ucmd->cmd;
- struct iommufd_object *obj;
- obj = iommufd_object_remove(ucmd->ictx, cmd->id, false);
- if (IS_ERR(obj))
- return PTR_ERR(obj);
- iommufd_object_ops[obj->type].destroy(obj);
- kfree(obj);
- return 0;
+ return iommufd_object_remove(ucmd->ictx, NULL, cmd->id, 0);
}
static int iommufd_fops_open(struct inode *inode, struct file *filp)
@@ -238,6 +251,7 @@ static int iommufd_fops_open(struct inode *inode, struct file *filp)
xa_init_flags(&ictx->objects, XA_FLAGS_ALLOC1 | XA_FLAGS_ACCOUNT);
xa_init(&ictx->groups);
ictx->file = filp;
+ init_waitqueue_head(&ictx->destroy_wait);
filp->private_data = ictx;
return 0;
}
diff --git a/drivers/iommu/iommufd/selftest.c b/drivers/iommu/iommufd/selftest.c
index 5d93434003d8..022ef8f55088 100644
--- a/drivers/iommu/iommufd/selftest.c
+++ b/drivers/iommu/iommufd/selftest.c
@@ -86,7 +86,7 @@ void iommufd_test_syz_conv_iova_id(struct iommufd_ucmd *ucmd,
if (IS_ERR(ioas))
return;
*iova = iommufd_test_syz_conv_iova(&ioas->iopt, iova);
- iommufd_put_object(&ioas->obj);
+ iommufd_put_object(ucmd->ictx, &ioas->obj);
}
struct mock_iommu_domain {
@@ -500,7 +500,7 @@ get_md_pagetable(struct iommufd_ucmd *ucmd, u32 mockpt_id,
return hwpt;
if (hwpt->domain->type != IOMMU_DOMAIN_UNMANAGED ||
hwpt->domain->ops != mock_ops.default_domain_ops) {
- iommufd_put_object(&hwpt->obj);
+ iommufd_put_object(ucmd->ictx, &hwpt->obj);
return ERR_PTR(-EINVAL);
}
*mock = container_of(hwpt->domain, struct mock_iommu_domain, domain);
@@ -518,7 +518,7 @@ get_md_pagetable_nested(struct iommufd_ucmd *ucmd, u32 mockpt_id,
return hwpt;
if (hwpt->domain->type != IOMMU_DOMAIN_NESTED ||
hwpt->domain->ops != &domain_nested_ops) {
- iommufd_put_object(&hwpt->obj);
+ iommufd_put_object(ucmd->ictx, &hwpt->obj);
return ERR_PTR(-EINVAL);
}
*mock_nested = container_of(hwpt->domain,
@@ -681,7 +681,7 @@ static int iommufd_test_mock_domain_replace(struct iommufd_ucmd *ucmd,
rc = iommufd_ucmd_respond(ucmd, sizeof(*cmd));
out_dev_obj:
- iommufd_put_object(dev_obj);
+ iommufd_put_object(ucmd->ictx, dev_obj);
return rc;
}
@@ -699,7 +699,7 @@ static int iommufd_test_add_reserved(struct iommufd_ucmd *ucmd,
down_write(&ioas->iopt.iova_rwsem);
rc = iopt_reserve_iova(&ioas->iopt, start, start + length - 1, NULL);
up_write(&ioas->iopt.iova_rwsem);
- iommufd_put_object(&ioas->obj);
+ iommufd_put_object(ucmd->ictx, &ioas->obj);
return rc;
}
@@ -754,7 +754,7 @@ static int iommufd_test_md_check_pa(struct iommufd_ucmd *ucmd,
rc = 0;
out_put:
- iommufd_put_object(&hwpt->obj);
+ iommufd_put_object(ucmd->ictx, &hwpt->obj);
return rc;
}
@@ -1233,7 +1233,7 @@ static int iommufd_test_dirty(struct iommufd_ucmd *ucmd, unsigned int mockpt_id,
out_free:
kvfree(tmp);
out_put:
- iommufd_put_object(&hwpt->obj);
+ iommufd_put_object(ucmd->ictx, &hwpt->obj);
return rc;
}
diff --git a/drivers/iommu/iommufd/vfio_compat.c b/drivers/iommu/iommufd/vfio_compat.c
index 538fbf76354d..a3ad5f0b6c59 100644
--- a/drivers/iommu/iommufd/vfio_compat.c
+++ b/drivers/iommu/iommufd/vfio_compat.c
@@ -41,7 +41,7 @@ int iommufd_vfio_compat_ioas_get_id(struct iommufd_ctx *ictx, u32 *out_ioas_id)
if (IS_ERR(ioas))
return PTR_ERR(ioas);
*out_ioas_id = ioas->obj.id;
- iommufd_put_object(&ioas->obj);
+ iommufd_put_object(ictx, &ioas->obj);
return 0;
}
EXPORT_SYMBOL_NS_GPL(iommufd_vfio_compat_ioas_get_id, IOMMUFD_VFIO);
@@ -98,7 +98,7 @@ int iommufd_vfio_compat_ioas_create(struct iommufd_ctx *ictx)
if (ictx->vfio_ioas && iommufd_lock_obj(&ictx->vfio_ioas->obj)) {
ret = 0;
- iommufd_put_object(&ictx->vfio_ioas->obj);
+ iommufd_put_object(ictx, &ictx->vfio_ioas->obj);
goto out_abort;
}
ictx->vfio_ioas = ioas;
@@ -133,7 +133,7 @@ int iommufd_vfio_ioas(struct iommufd_ucmd *ucmd)
if (IS_ERR(ioas))
return PTR_ERR(ioas);
cmd->ioas_id = ioas->obj.id;
- iommufd_put_object(&ioas->obj);
+ iommufd_put_object(ucmd->ictx, &ioas->obj);
return iommufd_ucmd_respond(ucmd, sizeof(*cmd));
case IOMMU_VFIO_IOAS_SET:
@@ -143,7 +143,7 @@ int iommufd_vfio_ioas(struct iommufd_ucmd *ucmd)
xa_lock(&ucmd->ictx->objects);
ucmd->ictx->vfio_ioas = ioas;
xa_unlock(&ucmd->ictx->objects);
- iommufd_put_object(&ioas->obj);
+ iommufd_put_object(ucmd->ictx, &ioas->obj);
return 0;
case IOMMU_VFIO_IOAS_CLEAR:
@@ -190,7 +190,7 @@ static int iommufd_vfio_map_dma(struct iommufd_ctx *ictx, unsigned int cmd,
iova = map.iova;
rc = iopt_map_user_pages(ictx, &ioas->iopt, &iova, u64_to_user_ptr(map.vaddr),
map.size, iommu_prot, 0);
- iommufd_put_object(&ioas->obj);
+ iommufd_put_object(ictx, &ioas->obj);
return rc;
}
@@ -249,7 +249,7 @@ static int iommufd_vfio_unmap_dma(struct iommufd_ctx *ictx, unsigned int cmd,
rc = -EFAULT;
err_put:
- iommufd_put_object(&ioas->obj);
+ iommufd_put_object(ictx, &ioas->obj);
return rc;
}
@@ -272,7 +272,7 @@ static int iommufd_vfio_cc_iommu(struct iommufd_ctx *ictx)
}
mutex_unlock(&ioas->mutex);
- iommufd_put_object(&ioas->obj);
+ iommufd_put_object(ictx, &ioas->obj);
return rc;
}
@@ -349,7 +349,7 @@ static int iommufd_vfio_set_iommu(struct iommufd_ctx *ictx, unsigned long type)
*/
if (type == VFIO_TYPE1_IOMMU)
rc = iopt_disable_large_pages(&ioas->iopt);
- iommufd_put_object(&ioas->obj);
+ iommufd_put_object(ictx, &ioas->obj);
return rc;
}
@@ -511,7 +511,7 @@ static int iommufd_vfio_iommu_get_info(struct iommufd_ctx *ictx,
out_put:
up_read(&ioas->iopt.iova_rwsem);
- iommufd_put_object(&ioas->obj);
+ iommufd_put_object(ictx, &ioas->obj);
return rc;
}
diff --git a/drivers/iommu/of_iommu.c b/drivers/iommu/of_iommu.c
index 157b286e36bf..35ba090f3b5e 100644
--- a/drivers/iommu/of_iommu.c
+++ b/drivers/iommu/of_iommu.c
@@ -112,16 +112,20 @@ const struct iommu_ops *of_iommu_configure(struct device *dev,
const u32 *id)
{
const struct iommu_ops *ops = NULL;
- struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
+ struct iommu_fwspec *fwspec;
int err = NO_IOMMU;
if (!master_np)
return NULL;
+ /* Serialise to make dev->iommu stable under our potential fwspec */
+ mutex_lock(&iommu_probe_device_lock);
+ fwspec = dev_iommu_fwspec_get(dev);
if (fwspec) {
- if (fwspec->ops)
+ if (fwspec->ops) {
+ mutex_unlock(&iommu_probe_device_lock);
return fwspec->ops;
-
+ }
/* In the deferred case, start again from scratch */
iommu_fwspec_free(dev);
}
@@ -155,6 +159,8 @@ const struct iommu_ops *of_iommu_configure(struct device *dev,
fwspec = dev_iommu_fwspec_get(dev);
ops = fwspec->ops;
}
+ mutex_unlock(&iommu_probe_device_lock);
+
/*
* If we have reason to believe the IOMMU driver missed the initial
* probe for dev, replay it to get things in order.
@@ -191,7 +197,7 @@ iommu_resv_region_get_type(struct device *dev,
if (start == phys->start && end == phys->end)
return IOMMU_RESV_DIRECT;
- dev_warn(dev, "treating non-direct mapping [%pr] -> [%pap-%pap] as reservation\n", &phys,
+ dev_warn(dev, "treating non-direct mapping [%pr] -> [%pap-%pap] as reservation\n", phys,
&start, &end);
return IOMMU_RESV_RESERVED;
}
diff --git a/drivers/leds/led-class.c b/drivers/leds/led-class.c
index 974b84f6bd6a..ba1be15cfd8e 100644
--- a/drivers/leds/led-class.c
+++ b/drivers/leds/led-class.c
@@ -75,19 +75,6 @@ static ssize_t max_brightness_show(struct device *dev,
}
static DEVICE_ATTR_RO(max_brightness);
-static ssize_t color_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- const char *color_text = "invalid";
- struct led_classdev *led_cdev = dev_get_drvdata(dev);
-
- if (led_cdev->color < LED_COLOR_ID_MAX)
- color_text = led_colors[led_cdev->color];
-
- return sysfs_emit(buf, "%s\n", color_text);
-}
-static DEVICE_ATTR_RO(color);
-
#ifdef CONFIG_LEDS_TRIGGERS
static BIN_ATTR(trigger, 0644, led_trigger_read, led_trigger_write, 0);
static struct bin_attribute *led_trigger_bin_attrs[] = {
@@ -102,7 +89,6 @@ static const struct attribute_group led_trigger_group = {
static struct attribute *led_class_attrs[] = {
&dev_attr_brightness.attr,
&dev_attr_max_brightness.attr,
- &dev_attr_color.attr,
NULL,
};
diff --git a/drivers/leds/trigger/ledtrig-netdev.c b/drivers/leds/trigger/ledtrig-netdev.c
index e358e77e4b38..d76214fa9ad8 100644
--- a/drivers/leds/trigger/ledtrig-netdev.c
+++ b/drivers/leds/trigger/ledtrig-netdev.c
@@ -226,6 +226,11 @@ static int set_device_name(struct led_netdev_data *trigger_data,
cancel_delayed_work_sync(&trigger_data->work);
+ /*
+ * Take RTNL lock before trigger_data lock to prevent potential
+ * deadlock with netdev notifier registration.
+ */
+ rtnl_lock();
mutex_lock(&trigger_data->lock);
if (trigger_data->net_dev) {
@@ -245,16 +250,14 @@ static int set_device_name(struct led_netdev_data *trigger_data,
trigger_data->carrier_link_up = false;
trigger_data->link_speed = SPEED_UNKNOWN;
trigger_data->duplex = DUPLEX_UNKNOWN;
- if (trigger_data->net_dev != NULL) {
- rtnl_lock();
+ if (trigger_data->net_dev)
get_device_state(trigger_data);
- rtnl_unlock();
- }
trigger_data->last_activity = 0;
set_baseline_state(trigger_data);
mutex_unlock(&trigger_data->lock);
+ rtnl_unlock();
return 0;
}
diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
index de3019972b35..196cdacce38f 100644
--- a/drivers/md/bcache/btree.c
+++ b/drivers/md/bcache/btree.c
@@ -293,16 +293,16 @@ static void btree_complete_write(struct btree *b, struct btree_write *w)
w->journal = NULL;
}
-static void btree_node_write_unlock(struct closure *cl)
+static CLOSURE_CALLBACK(btree_node_write_unlock)
{
- struct btree *b = container_of(cl, struct btree, io);
+ closure_type(b, struct btree, io);
up(&b->io_mutex);
}
-static void __btree_node_write_done(struct closure *cl)
+static CLOSURE_CALLBACK(__btree_node_write_done)
{
- struct btree *b = container_of(cl, struct btree, io);
+ closure_type(b, struct btree, io);
struct btree_write *w = btree_prev_write(b);
bch_bbio_free(b->bio, b->c);
@@ -315,12 +315,12 @@ static void __btree_node_write_done(struct closure *cl)
closure_return_with_destructor(cl, btree_node_write_unlock);
}
-static void btree_node_write_done(struct closure *cl)
+static CLOSURE_CALLBACK(btree_node_write_done)
{
- struct btree *b = container_of(cl, struct btree, io);
+ closure_type(b, struct btree, io);
bio_free_pages(b->bio);
- __btree_node_write_done(cl);
+ __btree_node_write_done(&cl->work);
}
static void btree_node_write_endio(struct bio *bio)
@@ -1522,7 +1522,7 @@ out_nocoalesce:
bch_keylist_free(&keylist);
for (i = 0; i < nodes; i++)
- if (!IS_ERR(new_nodes[i])) {
+ if (!IS_ERR_OR_NULL(new_nodes[i])) {
btree_node_free(new_nodes[i]);
rw_unlock(true, new_nodes[i]);
}
diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c
index c182c21de2e8..7ff14bd2feb8 100644
--- a/drivers/md/bcache/journal.c
+++ b/drivers/md/bcache/journal.c
@@ -723,11 +723,11 @@ static void journal_write_endio(struct bio *bio)
closure_put(&w->c->journal.io);
}
-static void journal_write(struct closure *cl);
+static CLOSURE_CALLBACK(journal_write);
-static void journal_write_done(struct closure *cl)
+static CLOSURE_CALLBACK(journal_write_done)
{
- struct journal *j = container_of(cl, struct journal, io);
+ closure_type(j, struct journal, io);
struct journal_write *w = (j->cur == j->w)
? &j->w[1]
: &j->w[0];
@@ -736,19 +736,19 @@ static void journal_write_done(struct closure *cl)
continue_at_nobarrier(cl, journal_write, bch_journal_wq);
}
-static void journal_write_unlock(struct closure *cl)
+static CLOSURE_CALLBACK(journal_write_unlock)
__releases(&c->journal.lock)
{
- struct cache_set *c = container_of(cl, struct cache_set, journal.io);
+ closure_type(c, struct cache_set, journal.io);
c->journal.io_in_flight = 0;
spin_unlock(&c->journal.lock);
}
-static void journal_write_unlocked(struct closure *cl)
+static CLOSURE_CALLBACK(journal_write_unlocked)
__releases(c->journal.lock)
{
- struct cache_set *c = container_of(cl, struct cache_set, journal.io);
+ closure_type(c, struct cache_set, journal.io);
struct cache *ca = c->cache;
struct journal_write *w = c->journal.cur;
struct bkey *k = &c->journal.key;
@@ -823,12 +823,12 @@ static void journal_write_unlocked(struct closure *cl)
continue_at(cl, journal_write_done, NULL);
}
-static void journal_write(struct closure *cl)
+static CLOSURE_CALLBACK(journal_write)
{
- struct cache_set *c = container_of(cl, struct cache_set, journal.io);
+ closure_type(c, struct cache_set, journal.io);
spin_lock(&c->journal.lock);
- journal_write_unlocked(cl);
+ journal_write_unlocked(&cl->work);
}
static void journal_try_write(struct cache_set *c)
diff --git a/drivers/md/bcache/movinggc.c b/drivers/md/bcache/movinggc.c
index 9f32901fdad1..ebd500bdf0b2 100644
--- a/drivers/md/bcache/movinggc.c
+++ b/drivers/md/bcache/movinggc.c
@@ -35,16 +35,16 @@ static bool moving_pred(struct keybuf *buf, struct bkey *k)
/* Moving GC - IO loop */
-static void moving_io_destructor(struct closure *cl)
+static CLOSURE_CALLBACK(moving_io_destructor)
{
- struct moving_io *io = container_of(cl, struct moving_io, cl);
+ closure_type(io, struct moving_io, cl);
kfree(io);
}
-static void write_moving_finish(struct closure *cl)
+static CLOSURE_CALLBACK(write_moving_finish)
{
- struct moving_io *io = container_of(cl, struct moving_io, cl);
+ closure_type(io, struct moving_io, cl);
struct bio *bio = &io->bio.bio;
bio_free_pages(bio);
@@ -89,9 +89,9 @@ static void moving_init(struct moving_io *io)
bch_bio_map(bio, NULL);
}
-static void write_moving(struct closure *cl)
+static CLOSURE_CALLBACK(write_moving)
{
- struct moving_io *io = container_of(cl, struct moving_io, cl);
+ closure_type(io, struct moving_io, cl);
struct data_insert_op *op = &io->op;
if (!op->status) {
@@ -113,9 +113,9 @@ static void write_moving(struct closure *cl)
continue_at(cl, write_moving_finish, op->wq);
}
-static void read_moving_submit(struct closure *cl)
+static CLOSURE_CALLBACK(read_moving_submit)
{
- struct moving_io *io = container_of(cl, struct moving_io, cl);
+ closure_type(io, struct moving_io, cl);
struct bio *bio = &io->bio.bio;
bch_submit_bbio(bio, io->op.c, &io->w->key, 0);
diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
index a9b1f3896249..83d112bd2b1c 100644
--- a/drivers/md/bcache/request.c
+++ b/drivers/md/bcache/request.c
@@ -25,7 +25,7 @@
struct kmem_cache *bch_search_cache;
-static void bch_data_insert_start(struct closure *cl);
+static CLOSURE_CALLBACK(bch_data_insert_start);
static unsigned int cache_mode(struct cached_dev *dc)
{
@@ -55,9 +55,9 @@ static void bio_csum(struct bio *bio, struct bkey *k)
/* Insert data into cache */
-static void bch_data_insert_keys(struct closure *cl)
+static CLOSURE_CALLBACK(bch_data_insert_keys)
{
- struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
+ closure_type(op, struct data_insert_op, cl);
atomic_t *journal_ref = NULL;
struct bkey *replace_key = op->replace ? &op->replace_key : NULL;
int ret;
@@ -136,9 +136,9 @@ out:
continue_at(cl, bch_data_insert_keys, op->wq);
}
-static void bch_data_insert_error(struct closure *cl)
+static CLOSURE_CALLBACK(bch_data_insert_error)
{
- struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
+ closure_type(op, struct data_insert_op, cl);
/*
* Our data write just errored, which means we've got a bunch of keys to
@@ -163,7 +163,7 @@ static void bch_data_insert_error(struct closure *cl)
op->insert_keys.top = dst;
- bch_data_insert_keys(cl);
+ bch_data_insert_keys(&cl->work);
}
static void bch_data_insert_endio(struct bio *bio)
@@ -184,9 +184,9 @@ static void bch_data_insert_endio(struct bio *bio)
bch_bbio_endio(op->c, bio, bio->bi_status, "writing data to cache");
}
-static void bch_data_insert_start(struct closure *cl)
+static CLOSURE_CALLBACK(bch_data_insert_start)
{
- struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
+ closure_type(op, struct data_insert_op, cl);
struct bio *bio = op->bio, *n;
if (op->bypass)
@@ -305,16 +305,16 @@ err:
* If op->bypass is true, instead of inserting the data it invalidates the
* region of the cache represented by op->bio and op->inode.
*/
-void bch_data_insert(struct closure *cl)
+CLOSURE_CALLBACK(bch_data_insert)
{
- struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
+ closure_type(op, struct data_insert_op, cl);
trace_bcache_write(op->c, op->inode, op->bio,
op->writeback, op->bypass);
bch_keylist_init(&op->insert_keys);
bio_get(op->bio);
- bch_data_insert_start(cl);
+ bch_data_insert_start(&cl->work);
}
/*
@@ -575,9 +575,9 @@ static int cache_lookup_fn(struct btree_op *op, struct btree *b, struct bkey *k)
return n == bio ? MAP_DONE : MAP_CONTINUE;
}
-static void cache_lookup(struct closure *cl)
+static CLOSURE_CALLBACK(cache_lookup)
{
- struct search *s = container_of(cl, struct search, iop.cl);
+ closure_type(s, struct search, iop.cl);
struct bio *bio = &s->bio.bio;
struct cached_dev *dc;
int ret;
@@ -698,9 +698,9 @@ static void do_bio_hook(struct search *s,
bio_cnt_set(bio, 3);
}
-static void search_free(struct closure *cl)
+static CLOSURE_CALLBACK(search_free)
{
- struct search *s = container_of(cl, struct search, cl);
+ closure_type(s, struct search, cl);
atomic_dec(&s->iop.c->search_inflight);
@@ -749,20 +749,20 @@ static inline struct search *search_alloc(struct bio *bio,
/* Cached devices */
-static void cached_dev_bio_complete(struct closure *cl)
+static CLOSURE_CALLBACK(cached_dev_bio_complete)
{
- struct search *s = container_of(cl, struct search, cl);
+ closure_type(s, struct search, cl);
struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
cached_dev_put(dc);
- search_free(cl);
+ search_free(&cl->work);
}
/* Process reads */
-static void cached_dev_read_error_done(struct closure *cl)
+static CLOSURE_CALLBACK(cached_dev_read_error_done)
{
- struct search *s = container_of(cl, struct search, cl);
+ closure_type(s, struct search, cl);
if (s->iop.replace_collision)
bch_mark_cache_miss_collision(s->iop.c, s->d);
@@ -770,12 +770,12 @@ static void cached_dev_read_error_done(struct closure *cl)
if (s->iop.bio)
bio_free_pages(s->iop.bio);
- cached_dev_bio_complete(cl);
+ cached_dev_bio_complete(&cl->work);
}
-static void cached_dev_read_error(struct closure *cl)
+static CLOSURE_CALLBACK(cached_dev_read_error)
{
- struct search *s = container_of(cl, struct search, cl);
+ closure_type(s, struct search, cl);
struct bio *bio = &s->bio.bio;
/*
@@ -801,9 +801,9 @@ static void cached_dev_read_error(struct closure *cl)
continue_at(cl, cached_dev_read_error_done, NULL);
}
-static void cached_dev_cache_miss_done(struct closure *cl)
+static CLOSURE_CALLBACK(cached_dev_cache_miss_done)
{
- struct search *s = container_of(cl, struct search, cl);
+ closure_type(s, struct search, cl);
struct bcache_device *d = s->d;
if (s->iop.replace_collision)
@@ -812,13 +812,13 @@ static void cached_dev_cache_miss_done(struct closure *cl)
if (s->iop.bio)
bio_free_pages(s->iop.bio);
- cached_dev_bio_complete(cl);
+ cached_dev_bio_complete(&cl->work);
closure_put(&d->cl);
}
-static void cached_dev_read_done(struct closure *cl)
+static CLOSURE_CALLBACK(cached_dev_read_done)
{
- struct search *s = container_of(cl, struct search, cl);
+ closure_type(s, struct search, cl);
struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
/*
@@ -858,9 +858,9 @@ static void cached_dev_read_done(struct closure *cl)
continue_at(cl, cached_dev_cache_miss_done, NULL);
}
-static void cached_dev_read_done_bh(struct closure *cl)
+static CLOSURE_CALLBACK(cached_dev_read_done_bh)
{
- struct search *s = container_of(cl, struct search, cl);
+ closure_type(s, struct search, cl);
struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
bch_mark_cache_accounting(s->iop.c, s->d,
@@ -955,13 +955,13 @@ static void cached_dev_read(struct cached_dev *dc, struct search *s)
/* Process writes */
-static void cached_dev_write_complete(struct closure *cl)
+static CLOSURE_CALLBACK(cached_dev_write_complete)
{
- struct search *s = container_of(cl, struct search, cl);
+ closure_type(s, struct search, cl);
struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
up_read_non_owner(&dc->writeback_lock);
- cached_dev_bio_complete(cl);
+ cached_dev_bio_complete(&cl->work);
}
static void cached_dev_write(struct cached_dev *dc, struct search *s)
@@ -1048,9 +1048,9 @@ insert_data:
continue_at(cl, cached_dev_write_complete, NULL);
}
-static void cached_dev_nodata(struct closure *cl)
+static CLOSURE_CALLBACK(cached_dev_nodata)
{
- struct search *s = container_of(cl, struct search, cl);
+ closure_type(s, struct search, cl);
struct bio *bio = &s->bio.bio;
if (s->iop.flush_journal)
@@ -1265,9 +1265,9 @@ static int flash_dev_cache_miss(struct btree *b, struct search *s,
return MAP_CONTINUE;
}
-static void flash_dev_nodata(struct closure *cl)
+static CLOSURE_CALLBACK(flash_dev_nodata)
{
- struct search *s = container_of(cl, struct search, cl);
+ closure_type(s, struct search, cl);
if (s->iop.flush_journal)
bch_journal_meta(s->iop.c, cl);
diff --git a/drivers/md/bcache/request.h b/drivers/md/bcache/request.h
index 38ab4856eaab..46bbef00aebb 100644
--- a/drivers/md/bcache/request.h
+++ b/drivers/md/bcache/request.h
@@ -34,7 +34,7 @@ struct data_insert_op {
};
unsigned int bch_get_congested(const struct cache_set *c);
-void bch_data_insert(struct closure *cl);
+CLOSURE_CALLBACK(bch_data_insert);
void bch_cached_dev_request_init(struct cached_dev *dc);
void cached_dev_submit_bio(struct bio *bio);
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
index bfe1685dbae5..1402096b8076 100644
--- a/drivers/md/bcache/super.c
+++ b/drivers/md/bcache/super.c
@@ -327,9 +327,9 @@ static void __write_super(struct cache_sb *sb, struct cache_sb_disk *out,
submit_bio(bio);
}
-static void bch_write_bdev_super_unlock(struct closure *cl)
+static CLOSURE_CALLBACK(bch_write_bdev_super_unlock)
{
- struct cached_dev *dc = container_of(cl, struct cached_dev, sb_write);
+ closure_type(dc, struct cached_dev, sb_write);
up(&dc->sb_write_mutex);
}
@@ -363,9 +363,9 @@ static void write_super_endio(struct bio *bio)
closure_put(&ca->set->sb_write);
}
-static void bcache_write_super_unlock(struct closure *cl)
+static CLOSURE_CALLBACK(bcache_write_super_unlock)
{
- struct cache_set *c = container_of(cl, struct cache_set, sb_write);
+ closure_type(c, struct cache_set, sb_write);
up(&c->sb_write_mutex);
}
@@ -407,9 +407,9 @@ static void uuid_endio(struct bio *bio)
closure_put(cl);
}
-static void uuid_io_unlock(struct closure *cl)
+static CLOSURE_CALLBACK(uuid_io_unlock)
{
- struct cache_set *c = container_of(cl, struct cache_set, uuid_write);
+ closure_type(c, struct cache_set, uuid_write);
up(&c->uuid_write_mutex);
}
@@ -1344,9 +1344,9 @@ void bch_cached_dev_release(struct kobject *kobj)
module_put(THIS_MODULE);
}
-static void cached_dev_free(struct closure *cl)
+static CLOSURE_CALLBACK(cached_dev_free)
{
- struct cached_dev *dc = container_of(cl, struct cached_dev, disk.cl);
+ closure_type(dc, struct cached_dev, disk.cl);
if (test_and_clear_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags))
cancel_writeback_rate_update_dwork(dc);
@@ -1378,9 +1378,9 @@ static void cached_dev_free(struct closure *cl)
kobject_put(&dc->disk.kobj);
}
-static void cached_dev_flush(struct closure *cl)
+static CLOSURE_CALLBACK(cached_dev_flush)
{
- struct cached_dev *dc = container_of(cl, struct cached_dev, disk.cl);
+ closure_type(dc, struct cached_dev, disk.cl);
struct bcache_device *d = &dc->disk;
mutex_lock(&bch_register_lock);
@@ -1499,9 +1499,9 @@ void bch_flash_dev_release(struct kobject *kobj)
kfree(d);
}
-static void flash_dev_free(struct closure *cl)
+static CLOSURE_CALLBACK(flash_dev_free)
{
- struct bcache_device *d = container_of(cl, struct bcache_device, cl);
+ closure_type(d, struct bcache_device, cl);
mutex_lock(&bch_register_lock);
atomic_long_sub(bcache_dev_sectors_dirty(d),
@@ -1512,9 +1512,9 @@ static void flash_dev_free(struct closure *cl)
kobject_put(&d->kobj);
}
-static void flash_dev_flush(struct closure *cl)
+static CLOSURE_CALLBACK(flash_dev_flush)
{
- struct bcache_device *d = container_of(cl, struct bcache_device, cl);
+ closure_type(d, struct bcache_device, cl);
mutex_lock(&bch_register_lock);
bcache_device_unlink(d);
@@ -1670,9 +1670,9 @@ void bch_cache_set_release(struct kobject *kobj)
module_put(THIS_MODULE);
}
-static void cache_set_free(struct closure *cl)
+static CLOSURE_CALLBACK(cache_set_free)
{
- struct cache_set *c = container_of(cl, struct cache_set, cl);
+ closure_type(c, struct cache_set, cl);
struct cache *ca;
debugfs_remove(c->debug);
@@ -1711,9 +1711,9 @@ static void cache_set_free(struct closure *cl)
kobject_put(&c->kobj);
}
-static void cache_set_flush(struct closure *cl)
+static CLOSURE_CALLBACK(cache_set_flush)
{
- struct cache_set *c = container_of(cl, struct cache_set, caching);
+ closure_type(c, struct cache_set, caching);
struct cache *ca = c->cache;
struct btree *b;
@@ -1808,9 +1808,9 @@ static void conditional_stop_bcache_device(struct cache_set *c,
}
}
-static void __cache_set_unregister(struct closure *cl)
+static CLOSURE_CALLBACK(__cache_set_unregister)
{
- struct cache_set *c = container_of(cl, struct cache_set, caching);
+ closure_type(c, struct cache_set, caching);
struct cached_dev *dc;
struct bcache_device *d;
size_t i;
diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c
index 3accfdaee6b1..8827a6f130ad 100644
--- a/drivers/md/bcache/writeback.c
+++ b/drivers/md/bcache/writeback.c
@@ -341,16 +341,16 @@ static void dirty_init(struct keybuf_key *w)
bch_bio_map(bio, NULL);
}
-static void dirty_io_destructor(struct closure *cl)
+static CLOSURE_CALLBACK(dirty_io_destructor)
{
- struct dirty_io *io = container_of(cl, struct dirty_io, cl);
+ closure_type(io, struct dirty_io, cl);
kfree(io);
}
-static void write_dirty_finish(struct closure *cl)
+static CLOSURE_CALLBACK(write_dirty_finish)
{
- struct dirty_io *io = container_of(cl, struct dirty_io, cl);
+ closure_type(io, struct dirty_io, cl);
struct keybuf_key *w = io->bio.bi_private;
struct cached_dev *dc = io->dc;
@@ -400,9 +400,9 @@ static void dirty_endio(struct bio *bio)
closure_put(&io->cl);
}
-static void write_dirty(struct closure *cl)
+static CLOSURE_CALLBACK(write_dirty)
{
- struct dirty_io *io = container_of(cl, struct dirty_io, cl);
+ closure_type(io, struct dirty_io, cl);
struct keybuf_key *w = io->bio.bi_private;
struct cached_dev *dc = io->dc;
@@ -462,9 +462,9 @@ static void read_dirty_endio(struct bio *bio)
dirty_endio(bio);
}
-static void read_dirty_submit(struct closure *cl)
+static CLOSURE_CALLBACK(read_dirty_submit)
{
- struct dirty_io *io = container_of(cl, struct dirty_io, cl);
+ closure_type(io, struct dirty_io, cl);
closure_bio_submit(io->dc->disk.c, &io->bio, cl);
diff --git a/drivers/md/dm-flakey.c b/drivers/md/dm-flakey.c
index 120153e44ae0..f57fb821528d 100644
--- a/drivers/md/dm-flakey.c
+++ b/drivers/md/dm-flakey.c
@@ -434,7 +434,7 @@ static struct bio *clone_bio(struct dm_target *ti, struct flakey_c *fc, struct b
remaining_size = size;
- order = MAX_ORDER - 1;
+ order = MAX_ORDER;
while (remaining_size) {
struct page *pages;
unsigned size_to_add, to_copy;
diff --git a/drivers/md/dm-verity-fec.c b/drivers/md/dm-verity-fec.c
index 2099c755119e..b475200d8586 100644
--- a/drivers/md/dm-verity-fec.c
+++ b/drivers/md/dm-verity-fec.c
@@ -24,7 +24,8 @@ bool verity_fec_is_enabled(struct dm_verity *v)
*/
static inline struct dm_verity_fec_io *fec_io(struct dm_verity_io *io)
{
- return (struct dm_verity_fec_io *) verity_io_digest_end(io->v, io);
+ return (struct dm_verity_fec_io *)
+ ((char *)io + io->v->ti->per_io_data_size - sizeof(struct dm_verity_fec_io));
}
/*
diff --git a/drivers/md/dm-verity-target.c b/drivers/md/dm-verity-target.c
index e115fcfe723c..14e58ae70521 100644
--- a/drivers/md/dm-verity-target.c
+++ b/drivers/md/dm-verity-target.c
@@ -642,7 +642,6 @@ static void verity_work(struct work_struct *w)
io->in_tasklet = false;
- verity_fec_init_io(io);
verity_finish_io(io, errno_to_blk_status(verity_verify_io(io)));
}
@@ -668,7 +667,9 @@ static void verity_end_io(struct bio *bio)
struct dm_verity_io *io = bio->bi_private;
if (bio->bi_status &&
- (!verity_fec_is_enabled(io->v) || verity_is_system_shutting_down())) {
+ (!verity_fec_is_enabled(io->v) ||
+ verity_is_system_shutting_down() ||
+ (bio->bi_opf & REQ_RAHEAD))) {
verity_finish_io(io, bio->bi_status);
return;
}
@@ -792,6 +793,8 @@ static int verity_map(struct dm_target *ti, struct bio *bio)
bio->bi_private = io;
io->iter = bio->bi_iter;
+ verity_fec_init_io(io);
+
verity_submit_prefetch(v, io);
submit_bio_noacct(bio);
diff --git a/drivers/md/dm-verity.h b/drivers/md/dm-verity.h
index f96f4e281ee4..f9d522c870e6 100644
--- a/drivers/md/dm-verity.h
+++ b/drivers/md/dm-verity.h
@@ -115,12 +115,6 @@ static inline u8 *verity_io_want_digest(struct dm_verity *v,
return (u8 *)(io + 1) + v->ahash_reqsize + v->digest_size;
}
-static inline u8 *verity_io_digest_end(struct dm_verity *v,
- struct dm_verity_io *io)
-{
- return verity_io_want_digest(v, io) + v->digest_size;
-}
-
extern int verity_for_bv_block(struct dm_verity *v, struct dm_verity_io *io,
struct bvec_iter *iter,
int (*process)(struct dm_verity *v,
diff --git a/drivers/md/md.c b/drivers/md/md.c
index c94373d64f2c..b066abbffd10 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -490,7 +490,7 @@ int mddev_suspend(struct mddev *mddev, bool interruptible)
}
EXPORT_SYMBOL_GPL(mddev_suspend);
-void mddev_resume(struct mddev *mddev)
+static void __mddev_resume(struct mddev *mddev, bool recovery_needed)
{
lockdep_assert_not_held(&mddev->reconfig_mutex);
@@ -507,12 +507,18 @@ void mddev_resume(struct mddev *mddev)
percpu_ref_resurrect(&mddev->active_io);
wake_up(&mddev->sb_wait);
- set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
+ if (recovery_needed)
+ set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
md_wakeup_thread(mddev->thread);
md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */
mutex_unlock(&mddev->suspend_mutex);
}
+
+void mddev_resume(struct mddev *mddev)
+{
+ return __mddev_resume(mddev, true);
+}
EXPORT_SYMBOL_GPL(mddev_resume);
/*
@@ -4840,25 +4846,29 @@ action_show(struct mddev *mddev, char *page)
return sprintf(page, "%s\n", type);
}
-static void stop_sync_thread(struct mddev *mddev)
+/**
+ * stop_sync_thread() - wait for sync_thread to stop if it's running.
+ * @mddev: the array.
+ * @locked: if set, reconfig_mutex will still be held after this function
+ * return; if not set, reconfig_mutex will be released after this
+ * function return.
+ * @check_seq: if set, only wait for curent running sync_thread to stop, noted
+ * that new sync_thread can still start.
+ */
+static void stop_sync_thread(struct mddev *mddev, bool locked, bool check_seq)
{
- if (!test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
- return;
+ int sync_seq;
- if (mddev_lock(mddev))
- return;
+ if (check_seq)
+ sync_seq = atomic_read(&mddev->sync_seq);
- /*
- * Check again in case MD_RECOVERY_RUNNING is cleared before lock is
- * held.
- */
if (!test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) {
- mddev_unlock(mddev);
+ if (!locked)
+ mddev_unlock(mddev);
return;
}
- if (work_pending(&mddev->del_work))
- flush_workqueue(md_misc_wq);
+ mddev_unlock(mddev);
set_bit(MD_RECOVERY_INTR, &mddev->recovery);
/*
@@ -4866,21 +4876,28 @@ static void stop_sync_thread(struct mddev *mddev)
* never happen
*/
md_wakeup_thread_directly(mddev->sync_thread);
+ if (work_pending(&mddev->sync_work))
+ flush_work(&mddev->sync_work);
- mddev_unlock(mddev);
+ wait_event(resync_wait,
+ !test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
+ (check_seq && sync_seq != atomic_read(&mddev->sync_seq)));
+
+ if (locked)
+ mddev_lock_nointr(mddev);
}
static void idle_sync_thread(struct mddev *mddev)
{
- int sync_seq = atomic_read(&mddev->sync_seq);
-
mutex_lock(&mddev->sync_mutex);
clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
- stop_sync_thread(mddev);
- wait_event(resync_wait, sync_seq != atomic_read(&mddev->sync_seq) ||
- !test_bit(MD_RECOVERY_RUNNING, &mddev->recovery));
+ if (mddev_lock(mddev)) {
+ mutex_unlock(&mddev->sync_mutex);
+ return;
+ }
+ stop_sync_thread(mddev, false, true);
mutex_unlock(&mddev->sync_mutex);
}
@@ -4888,11 +4905,13 @@ static void frozen_sync_thread(struct mddev *mddev)
{
mutex_lock(&mddev->sync_mutex);
set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
- stop_sync_thread(mddev);
- wait_event(resync_wait, mddev->sync_thread == NULL &&
- !test_bit(MD_RECOVERY_RUNNING, &mddev->recovery));
+ if (mddev_lock(mddev)) {
+ mutex_unlock(&mddev->sync_mutex);
+ return;
+ }
+ stop_sync_thread(mddev, false, false);
mutex_unlock(&mddev->sync_mutex);
}
@@ -6264,14 +6283,7 @@ static void md_clean(struct mddev *mddev)
static void __md_stop_writes(struct mddev *mddev)
{
- set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
- if (work_pending(&mddev->del_work))
- flush_workqueue(md_misc_wq);
- if (mddev->sync_thread) {
- set_bit(MD_RECOVERY_INTR, &mddev->recovery);
- md_reap_sync_thread(mddev);
- }
-
+ stop_sync_thread(mddev, true, false);
del_timer_sync(&mddev->safemode_timer);
if (mddev->pers && mddev->pers->quiesce) {
@@ -6355,25 +6367,16 @@ static int md_set_readonly(struct mddev *mddev, struct block_device *bdev)
int err = 0;
int did_freeze = 0;
+ if (mddev->external && test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags))
+ return -EBUSY;
+
if (!test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) {
did_freeze = 1;
set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
md_wakeup_thread(mddev->thread);
}
- if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
- set_bit(MD_RECOVERY_INTR, &mddev->recovery);
- /*
- * Thread might be blocked waiting for metadata update which will now
- * never happen
- */
- md_wakeup_thread_directly(mddev->sync_thread);
-
- if (mddev->external && test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags))
- return -EBUSY;
- mddev_unlock(mddev);
- wait_event(resync_wait, !test_bit(MD_RECOVERY_RUNNING,
- &mddev->recovery));
+ stop_sync_thread(mddev, false, false);
wait_event(mddev->sb_wait,
!test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags));
mddev_lock_nointr(mddev);
@@ -6383,29 +6386,30 @@ static int md_set_readonly(struct mddev *mddev, struct block_device *bdev)
mddev->sync_thread ||
test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) {
pr_warn("md: %s still in use.\n",mdname(mddev));
- if (did_freeze) {
- clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
- set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
- md_wakeup_thread(mddev->thread);
- }
err = -EBUSY;
goto out;
}
+
if (mddev->pers) {
__md_stop_writes(mddev);
- err = -ENXIO;
- if (mddev->ro == MD_RDONLY)
+ if (mddev->ro == MD_RDONLY) {
+ err = -ENXIO;
goto out;
+ }
+
mddev->ro = MD_RDONLY;
set_disk_ro(mddev->gendisk, 1);
+ }
+
+out:
+ if ((mddev->pers && !err) || did_freeze) {
clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
md_wakeup_thread(mddev->thread);
sysfs_notify_dirent_safe(mddev->sysfs_state);
- err = 0;
}
-out:
+
mutex_unlock(&mddev->open_mutex);
return err;
}
@@ -6426,20 +6430,8 @@ static int do_md_stop(struct mddev *mddev, int mode,
set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
md_wakeup_thread(mddev->thread);
}
- if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
- set_bit(MD_RECOVERY_INTR, &mddev->recovery);
-
- /*
- * Thread might be blocked waiting for metadata update which will now
- * never happen
- */
- md_wakeup_thread_directly(mddev->sync_thread);
- mddev_unlock(mddev);
- wait_event(resync_wait, (mddev->sync_thread == NULL &&
- !test_bit(MD_RECOVERY_RUNNING,
- &mddev->recovery)));
- mddev_lock_nointr(mddev);
+ stop_sync_thread(mddev, true, false);
mutex_lock(&mddev->open_mutex);
if ((mddev->pers && atomic_read(&mddev->openers) > !!bdev) ||
@@ -9403,7 +9395,15 @@ static void md_start_sync(struct work_struct *ws)
goto not_running;
}
- suspend ? mddev_unlock_and_resume(mddev) : mddev_unlock(mddev);
+ mddev_unlock(mddev);
+ /*
+ * md_start_sync was triggered by MD_RECOVERY_NEEDED, so we should
+ * not set it again. Otherwise, we may cause issue like this one:
+ * https://bugzilla.kernel.org/show_bug.cgi?id=218200
+ * Therefore, use __mddev_resume(mddev, false).
+ */
+ if (suspend)
+ __mddev_resume(mddev, false);
md_wakeup_thread(mddev->sync_thread);
sysfs_notify_dirent_safe(mddev->sysfs_action);
md_new_event();
@@ -9415,7 +9415,15 @@ not_running:
clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
- suspend ? mddev_unlock_and_resume(mddev) : mddev_unlock(mddev);
+ mddev_unlock(mddev);
+ /*
+ * md_start_sync was triggered by MD_RECOVERY_NEEDED, so we should
+ * not set it again. Otherwise, we may cause issue like this one:
+ * https://bugzilla.kernel.org/show_bug.cgi?id=218200
+ * Therefore, use __mddev_resume(mddev, false).
+ */
+ if (suspend)
+ __mddev_resume(mddev, false);
wake_up(&resync_wait);
if (test_and_clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery) &&
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index dc031d42f53b..26e1e8a5e941 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -5892,11 +5892,11 @@ static bool stripe_ahead_of_reshape(struct mddev *mddev, struct r5conf *conf,
int dd_idx;
for (dd_idx = 0; dd_idx < sh->disks; dd_idx++) {
- if (dd_idx == sh->pd_idx)
+ if (dd_idx == sh->pd_idx || dd_idx == sh->qd_idx)
continue;
min_sector = min(min_sector, sh->dev[dd_idx].sector);
- max_sector = min(max_sector, sh->dev[dd_idx].sector);
+ max_sector = max(max_sector, sh->dev[dd_idx].sector);
}
spin_lock_irq(&conf->device_lock);
diff --git a/drivers/media/pci/ivtv/Kconfig b/drivers/media/pci/ivtv/Kconfig
index 9be52101bc4f..2498f9079b75 100644
--- a/drivers/media/pci/ivtv/Kconfig
+++ b/drivers/media/pci/ivtv/Kconfig
@@ -48,9 +48,7 @@ config VIDEO_IVTV_ALSA
config VIDEO_FB_IVTV
tristate "Conexant cx23415 framebuffer support"
depends on VIDEO_IVTV && FB
- select FB_CFB_FILLRECT
- select FB_CFB_COPYAREA
- select FB_CFB_IMAGEBLIT
+ select FB_IOMEM_HELPERS
help
This is a framebuffer driver for the Conexant cx23415 MPEG
encoder/decoder.
diff --git a/drivers/media/pci/ivtv/ivtvfb.c b/drivers/media/pci/ivtv/ivtvfb.c
index 23c8c094e791..410477e3e621 100644
--- a/drivers/media/pci/ivtv/ivtvfb.c
+++ b/drivers/media/pci/ivtv/ivtvfb.c
@@ -927,17 +927,17 @@ static int ivtvfb_blank(int blank_mode, struct fb_info *info)
static const struct fb_ops ivtvfb_ops = {
.owner = THIS_MODULE,
+ .fb_read = fb_io_read,
.fb_write = ivtvfb_write,
.fb_check_var = ivtvfb_check_var,
.fb_set_par = ivtvfb_set_par,
.fb_setcolreg = ivtvfb_setcolreg,
- .fb_fillrect = cfb_fillrect,
- .fb_copyarea = cfb_copyarea,
- .fb_imageblit = cfb_imageblit,
+ __FB_DEFAULT_IOMEM_OPS_DRAW,
.fb_cursor = NULL,
.fb_ioctl = ivtvfb_ioctl,
.fb_pan_display = ivtvfb_pan_display,
.fb_blank = ivtvfb_blank,
+ __FB_DEFAULT_IOMEM_OPS_MMAP,
};
/* Restore hardware after firmware restart */
diff --git a/drivers/media/pci/mgb4/Kconfig b/drivers/media/pci/mgb4/Kconfig
index 13fad15a434c..f2a05a1c8ffa 100644
--- a/drivers/media/pci/mgb4/Kconfig
+++ b/drivers/media/pci/mgb4/Kconfig
@@ -2,6 +2,7 @@
config VIDEO_MGB4
tristate "Digiteq Automotive MGB4 support"
depends on VIDEO_DEV && PCI && I2C && DMADEVICES && SPI && MTD && IIO
+ depends on COMMON_CLK
select VIDEOBUF2_DMA_SG
select IIO_BUFFER
select IIO_TRIGGERED_BUFFER
diff --git a/drivers/media/pci/mgb4/mgb4_core.c b/drivers/media/pci/mgb4/mgb4_core.c
index 3efb33fbf40c..5bfb8a06202e 100644
--- a/drivers/media/pci/mgb4/mgb4_core.c
+++ b/drivers/media/pci/mgb4/mgb4_core.c
@@ -42,6 +42,10 @@
#define MGB4_USER_IRQS 16
+#define DIGITEQ_VID 0x1ed8
+#define T100_DID 0x0101
+#define T200_DID 0x0201
+
ATTRIBUTE_GROUPS(mgb4_pci);
static int flashid;
@@ -151,7 +155,7 @@ static struct spi_master *get_spi_adap(struct platform_device *pdev)
return dev ? container_of(dev, struct spi_master, dev) : NULL;
}
-static int init_spi(struct mgb4_dev *mgbdev)
+static int init_spi(struct mgb4_dev *mgbdev, u32 devid)
{
struct resource spi_resources[] = {
{
@@ -213,8 +217,13 @@ static int init_spi(struct mgb4_dev *mgbdev)
snprintf(mgbdev->fw_part_name, sizeof(mgbdev->fw_part_name),
"mgb4-fw.%d", flashid);
mgbdev->partitions[0].name = mgbdev->fw_part_name;
- mgbdev->partitions[0].size = 0x400000;
- mgbdev->partitions[0].offset = 0x400000;
+ if (devid == T200_DID) {
+ mgbdev->partitions[0].size = 0x950000;
+ mgbdev->partitions[0].offset = 0x1000000;
+ } else {
+ mgbdev->partitions[0].size = 0x400000;
+ mgbdev->partitions[0].offset = 0x400000;
+ }
mgbdev->partitions[0].mask_flags = 0;
snprintf(mgbdev->data_part_name, sizeof(mgbdev->data_part_name),
@@ -551,7 +560,7 @@ static int mgb4_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto err_video_regs;
/* SPI FLASH */
- rv = init_spi(mgbdev);
+ rv = init_spi(mgbdev, id->device);
if (rv < 0)
goto err_cmt_regs;
@@ -666,7 +675,8 @@ static void mgb4_remove(struct pci_dev *pdev)
}
static const struct pci_device_id mgb4_pci_ids[] = {
- { PCI_DEVICE(0x1ed8, 0x0101), },
+ { PCI_DEVICE(DIGITEQ_VID, T100_DID), },
+ { PCI_DEVICE(DIGITEQ_VID, T200_DID), },
{ 0, }
};
MODULE_DEVICE_TABLE(pci, mgb4_pci_ids);
diff --git a/drivers/media/platform/renesas/vsp1/vsp1_pipe.c b/drivers/media/platform/renesas/vsp1/vsp1_pipe.c
index f8093ba9539e..68d05243c3ee 100644
--- a/drivers/media/platform/renesas/vsp1/vsp1_pipe.c
+++ b/drivers/media/platform/renesas/vsp1/vsp1_pipe.c
@@ -373,7 +373,7 @@ int vsp1_pipeline_stop(struct vsp1_pipeline *pipe)
(7 << VI6_DPR_SMPPT_TGW_SHIFT) |
(VI6_DPR_NODE_UNUSED << VI6_DPR_SMPPT_PT_SHIFT));
- v4l2_subdev_call(&pipe->output->entity.subdev, video, s_stream, 0);
+ vsp1_wpf_stop(pipe->output);
return ret;
}
diff --git a/drivers/media/platform/renesas/vsp1/vsp1_rpf.c b/drivers/media/platform/renesas/vsp1/vsp1_rpf.c
index 3b17f5fa4067..ea12c3f12c92 100644
--- a/drivers/media/platform/renesas/vsp1/vsp1_rpf.c
+++ b/drivers/media/platform/renesas/vsp1/vsp1_rpf.c
@@ -44,14 +44,6 @@ static inline void vsp1_rpf_write(struct vsp1_rwpf *rpf,
}
/* -----------------------------------------------------------------------------
- * V4L2 Subdevice Operations
- */
-
-static const struct v4l2_subdev_ops rpf_ops = {
- .pad = &vsp1_rwpf_pad_ops,
-};
-
-/* -----------------------------------------------------------------------------
* VSP1 Entity Operations
*/
@@ -411,7 +403,7 @@ struct vsp1_rwpf *vsp1_rpf_create(struct vsp1_device *vsp1, unsigned int index)
rpf->entity.index = index;
sprintf(name, "rpf.%u", index);
- ret = vsp1_entity_init(vsp1, &rpf->entity, name, 2, &rpf_ops,
+ ret = vsp1_entity_init(vsp1, &rpf->entity, name, 2, &vsp1_rwpf_subdev_ops,
MEDIA_ENT_F_PROC_VIDEO_PIXEL_FORMATTER);
if (ret < 0)
return ERR_PTR(ret);
diff --git a/drivers/media/platform/renesas/vsp1/vsp1_rwpf.c b/drivers/media/platform/renesas/vsp1/vsp1_rwpf.c
index 22a82d218152..e0f87c8103ca 100644
--- a/drivers/media/platform/renesas/vsp1/vsp1_rwpf.c
+++ b/drivers/media/platform/renesas/vsp1/vsp1_rwpf.c
@@ -24,7 +24,7 @@ struct v4l2_rect *vsp1_rwpf_get_crop(struct vsp1_rwpf *rwpf,
}
/* -----------------------------------------------------------------------------
- * V4L2 Subdevice Pad Operations
+ * V4L2 Subdevice Operations
*/
static int vsp1_rwpf_enum_mbus_code(struct v4l2_subdev *subdev,
@@ -243,7 +243,7 @@ done:
return ret;
}
-const struct v4l2_subdev_pad_ops vsp1_rwpf_pad_ops = {
+static const struct v4l2_subdev_pad_ops vsp1_rwpf_pad_ops = {
.init_cfg = vsp1_entity_init_cfg,
.enum_mbus_code = vsp1_rwpf_enum_mbus_code,
.enum_frame_size = vsp1_rwpf_enum_frame_size,
@@ -253,6 +253,10 @@ const struct v4l2_subdev_pad_ops vsp1_rwpf_pad_ops = {
.set_selection = vsp1_rwpf_set_selection,
};
+const struct v4l2_subdev_ops vsp1_rwpf_subdev_ops = {
+ .pad = &vsp1_rwpf_pad_ops,
+};
+
/* -----------------------------------------------------------------------------
* Controls
*/
diff --git a/drivers/media/platform/renesas/vsp1/vsp1_rwpf.h b/drivers/media/platform/renesas/vsp1/vsp1_rwpf.h
index eac5c04c2239..e0d212c70b2f 100644
--- a/drivers/media/platform/renesas/vsp1/vsp1_rwpf.h
+++ b/drivers/media/platform/renesas/vsp1/vsp1_rwpf.h
@@ -79,9 +79,11 @@ static inline struct vsp1_rwpf *entity_to_rwpf(struct vsp1_entity *entity)
struct vsp1_rwpf *vsp1_rpf_create(struct vsp1_device *vsp1, unsigned int index);
struct vsp1_rwpf *vsp1_wpf_create(struct vsp1_device *vsp1, unsigned int index);
+void vsp1_wpf_stop(struct vsp1_rwpf *wpf);
+
int vsp1_rwpf_init_ctrls(struct vsp1_rwpf *rwpf, unsigned int ncontrols);
-extern const struct v4l2_subdev_pad_ops vsp1_rwpf_pad_ops;
+extern const struct v4l2_subdev_ops vsp1_rwpf_subdev_ops;
struct v4l2_rect *vsp1_rwpf_get_crop(struct vsp1_rwpf *rwpf,
struct v4l2_subdev_state *sd_state);
diff --git a/drivers/media/platform/renesas/vsp1/vsp1_wpf.c b/drivers/media/platform/renesas/vsp1/vsp1_wpf.c
index d0074ca00920..cab4445eca69 100644
--- a/drivers/media/platform/renesas/vsp1/vsp1_wpf.c
+++ b/drivers/media/platform/renesas/vsp1/vsp1_wpf.c
@@ -186,17 +186,13 @@ static int wpf_init_controls(struct vsp1_rwpf *wpf)
}
/* -----------------------------------------------------------------------------
- * V4L2 Subdevice Core Operations
+ * VSP1 Entity Operations
*/
-static int wpf_s_stream(struct v4l2_subdev *subdev, int enable)
+void vsp1_wpf_stop(struct vsp1_rwpf *wpf)
{
- struct vsp1_rwpf *wpf = to_rwpf(subdev);
struct vsp1_device *vsp1 = wpf->entity.vsp1;
- if (enable)
- return 0;
-
/*
* Write to registers directly when stopping the stream as there will be
* no pipeline run to apply the display list.
@@ -204,27 +200,8 @@ static int wpf_s_stream(struct v4l2_subdev *subdev, int enable)
vsp1_write(vsp1, VI6_WPF_IRQ_ENB(wpf->entity.index), 0);
vsp1_write(vsp1, wpf->entity.index * VI6_WPF_OFFSET +
VI6_WPF_SRCRPF, 0);
-
- return 0;
}
-/* -----------------------------------------------------------------------------
- * V4L2 Subdevice Operations
- */
-
-static const struct v4l2_subdev_video_ops wpf_video_ops = {
- .s_stream = wpf_s_stream,
-};
-
-static const struct v4l2_subdev_ops wpf_ops = {
- .video = &wpf_video_ops,
- .pad = &vsp1_rwpf_pad_ops,
-};
-
-/* -----------------------------------------------------------------------------
- * VSP1 Entity Operations
- */
-
static void vsp1_wpf_destroy(struct vsp1_entity *entity)
{
struct vsp1_rwpf *wpf = entity_to_rwpf(entity);
@@ -583,7 +560,7 @@ struct vsp1_rwpf *vsp1_wpf_create(struct vsp1_device *vsp1, unsigned int index)
wpf->entity.index = index;
sprintf(name, "wpf.%u", index);
- ret = vsp1_entity_init(vsp1, &wpf->entity, name, 2, &wpf_ops,
+ ret = vsp1_entity_init(vsp1, &wpf->entity, name, 2, &vsp1_rwpf_subdev_ops,
MEDIA_ENT_F_PROC_VIDEO_PIXEL_FORMATTER);
if (ret < 0)
return ERR_PTR(ret);
diff --git a/drivers/misc/mei/client.c b/drivers/misc/mei/client.c
index 9c8fc87938a7..9d090fa07516 100644
--- a/drivers/misc/mei/client.c
+++ b/drivers/misc/mei/client.c
@@ -2011,7 +2011,7 @@ ssize_t mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb, unsigned long time
mei_hdr = mei_msg_hdr_init(cb);
if (IS_ERR(mei_hdr)) {
- rets = -PTR_ERR(mei_hdr);
+ rets = PTR_ERR(mei_hdr);
mei_hdr = NULL;
goto err;
}
@@ -2032,7 +2032,7 @@ ssize_t mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb, unsigned long time
hbuf_slots = mei_hbuf_empty_slots(dev);
if (hbuf_slots < 0) {
- rets = -EOVERFLOW;
+ buf_len = -EOVERFLOW;
goto out;
}
diff --git a/drivers/misc/mei/pxp/mei_pxp.c b/drivers/misc/mei/pxp/mei_pxp.c
index f77d78fa5054..787c6a27a4be 100644
--- a/drivers/misc/mei/pxp/mei_pxp.c
+++ b/drivers/misc/mei/pxp/mei_pxp.c
@@ -84,9 +84,10 @@ mei_pxp_send_message(struct device *dev, const void *message, size_t size, unsig
byte = ret;
break;
}
+ return byte;
}
- return byte;
+ return 0;
}
/**
diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
index 152dfe593c43..f9a5cffa64b1 100644
--- a/drivers/mmc/core/block.c
+++ b/drivers/mmc/core/block.c
@@ -1482,6 +1482,8 @@ static void mmc_blk_cqe_complete_rq(struct mmc_queue *mq, struct request *req)
blk_mq_requeue_request(req, true);
else
__blk_mq_end_request(req, BLK_STS_OK);
+ } else if (mq->in_recovery) {
+ blk_mq_requeue_request(req, true);
} else {
blk_mq_end_request(req, BLK_STS_OK);
}
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index 3d3e0ca52614..a8c17b4cd737 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -551,7 +551,9 @@ int mmc_cqe_recovery(struct mmc_host *host)
cmd.flags = MMC_RSP_R1B | MMC_CMD_AC;
cmd.flags &= ~MMC_RSP_CRC; /* Ignore CRC */
cmd.busy_timeout = MMC_CQE_RECOVERY_TIMEOUT;
- mmc_wait_for_cmd(host, &cmd, 0);
+ mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
+
+ mmc_poll_for_busy(host->card, MMC_CQE_RECOVERY_TIMEOUT, true, MMC_BUSY_IO);
memset(&cmd, 0, sizeof(cmd));
cmd.opcode = MMC_CMDQ_TASK_MGMT;
@@ -559,10 +561,13 @@ int mmc_cqe_recovery(struct mmc_host *host)
cmd.flags = MMC_RSP_R1B | MMC_CMD_AC;
cmd.flags &= ~MMC_RSP_CRC; /* Ignore CRC */
cmd.busy_timeout = MMC_CQE_RECOVERY_TIMEOUT;
- err = mmc_wait_for_cmd(host, &cmd, 0);
+ err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
host->cqe_ops->cqe_recovery_finish(host);
+ if (err)
+ err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
+
mmc_retune_release(host);
return err;
diff --git a/drivers/mmc/host/cqhci-core.c b/drivers/mmc/host/cqhci-core.c
index b3d7d6d8d654..41e94cd14109 100644
--- a/drivers/mmc/host/cqhci-core.c
+++ b/drivers/mmc/host/cqhci-core.c
@@ -942,8 +942,8 @@ static bool cqhci_clear_all_tasks(struct mmc_host *mmc, unsigned int timeout)
ret = cqhci_tasks_cleared(cq_host);
if (!ret)
- pr_debug("%s: cqhci: Failed to clear tasks\n",
- mmc_hostname(mmc));
+ pr_warn("%s: cqhci: Failed to clear tasks\n",
+ mmc_hostname(mmc));
return ret;
}
@@ -976,7 +976,7 @@ static bool cqhci_halt(struct mmc_host *mmc, unsigned int timeout)
ret = cqhci_halted(cq_host);
if (!ret)
- pr_debug("%s: cqhci: Failed to halt\n", mmc_hostname(mmc));
+ pr_warn("%s: cqhci: Failed to halt\n", mmc_hostname(mmc));
return ret;
}
@@ -984,10 +984,10 @@ static bool cqhci_halt(struct mmc_host *mmc, unsigned int timeout)
/*
* After halting we expect to be able to use the command line. We interpret the
* failure to halt to mean the data lines might still be in use (and the upper
- * layers will need to send a STOP command), so we set the timeout based on a
- * generous command timeout.
+ * layers will need to send a STOP command), however failing to halt complicates
+ * the recovery, so set a timeout that would reasonably allow I/O to complete.
*/
-#define CQHCI_START_HALT_TIMEOUT 5
+#define CQHCI_START_HALT_TIMEOUT 500
static void cqhci_recovery_start(struct mmc_host *mmc)
{
@@ -1075,28 +1075,28 @@ static void cqhci_recovery_finish(struct mmc_host *mmc)
ok = cqhci_halt(mmc, CQHCI_FINISH_HALT_TIMEOUT);
- if (!cqhci_clear_all_tasks(mmc, CQHCI_CLEAR_TIMEOUT))
- ok = false;
-
/*
* The specification contradicts itself, by saying that tasks cannot be
* cleared if CQHCI does not halt, but if CQHCI does not halt, it should
* be disabled/re-enabled, but not to disable before clearing tasks.
* Have a go anyway.
*/
- if (!ok) {
- pr_debug("%s: cqhci: disable / re-enable\n", mmc_hostname(mmc));
- cqcfg = cqhci_readl(cq_host, CQHCI_CFG);
- cqcfg &= ~CQHCI_ENABLE;
- cqhci_writel(cq_host, cqcfg, CQHCI_CFG);
- cqcfg |= CQHCI_ENABLE;
- cqhci_writel(cq_host, cqcfg, CQHCI_CFG);
- /* Be sure that there are no tasks */
- ok = cqhci_halt(mmc, CQHCI_FINISH_HALT_TIMEOUT);
- if (!cqhci_clear_all_tasks(mmc, CQHCI_CLEAR_TIMEOUT))
- ok = false;
- WARN_ON(!ok);
- }
+ if (!cqhci_clear_all_tasks(mmc, CQHCI_CLEAR_TIMEOUT))
+ ok = false;
+
+ /* Disable to make sure tasks really are cleared */
+ cqcfg = cqhci_readl(cq_host, CQHCI_CFG);
+ cqcfg &= ~CQHCI_ENABLE;
+ cqhci_writel(cq_host, cqcfg, CQHCI_CFG);
+
+ cqcfg = cqhci_readl(cq_host, CQHCI_CFG);
+ cqcfg |= CQHCI_ENABLE;
+ cqhci_writel(cq_host, cqcfg, CQHCI_CFG);
+
+ cqhci_halt(mmc, CQHCI_FINISH_HALT_TIMEOUT);
+
+ if (!ok)
+ cqhci_clear_all_tasks(mmc, CQHCI_CLEAR_TIMEOUT);
cqhci_recover_mrqs(cq_host);
diff --git a/drivers/mmc/host/sdhci-pci-gli.c b/drivers/mmc/host/sdhci-pci-gli.c
index d8a991b349a8..77911a57b12c 100644
--- a/drivers/mmc/host/sdhci-pci-gli.c
+++ b/drivers/mmc/host/sdhci-pci-gli.c
@@ -1189,6 +1189,32 @@ static void gl9763e_hs400_enhanced_strobe(struct mmc_host *mmc,
sdhci_writel(host, val, SDHCI_GLI_9763E_HS400_ES_REG);
}
+static void gl9763e_set_low_power_negotiation(struct sdhci_pci_slot *slot,
+ bool enable)
+{
+ struct pci_dev *pdev = slot->chip->pdev;
+ u32 value;
+
+ pci_read_config_dword(pdev, PCIE_GLI_9763E_VHS, &value);
+ value &= ~GLI_9763E_VHS_REV;
+ value |= FIELD_PREP(GLI_9763E_VHS_REV, GLI_9763E_VHS_REV_W);
+ pci_write_config_dword(pdev, PCIE_GLI_9763E_VHS, value);
+
+ pci_read_config_dword(pdev, PCIE_GLI_9763E_CFG, &value);
+
+ if (enable)
+ value &= ~GLI_9763E_CFG_LPSN_DIS;
+ else
+ value |= GLI_9763E_CFG_LPSN_DIS;
+
+ pci_write_config_dword(pdev, PCIE_GLI_9763E_CFG, value);
+
+ pci_read_config_dword(pdev, PCIE_GLI_9763E_VHS, &value);
+ value &= ~GLI_9763E_VHS_REV;
+ value |= FIELD_PREP(GLI_9763E_VHS_REV, GLI_9763E_VHS_REV_R);
+ pci_write_config_dword(pdev, PCIE_GLI_9763E_VHS, value);
+}
+
static void sdhci_set_gl9763e_signaling(struct sdhci_host *host,
unsigned int timing)
{
@@ -1297,6 +1323,9 @@ static int gl9763e_add_host(struct sdhci_pci_slot *slot)
if (ret)
goto cleanup;
+ /* Disable LPM negotiation to avoid entering L1 state. */
+ gl9763e_set_low_power_negotiation(slot, false);
+
return 0;
cleanup:
@@ -1340,31 +1369,6 @@ static void gli_set_gl9763e(struct sdhci_pci_slot *slot)
}
#ifdef CONFIG_PM
-static void gl9763e_set_low_power_negotiation(struct sdhci_pci_slot *slot, bool enable)
-{
- struct pci_dev *pdev = slot->chip->pdev;
- u32 value;
-
- pci_read_config_dword(pdev, PCIE_GLI_9763E_VHS, &value);
- value &= ~GLI_9763E_VHS_REV;
- value |= FIELD_PREP(GLI_9763E_VHS_REV, GLI_9763E_VHS_REV_W);
- pci_write_config_dword(pdev, PCIE_GLI_9763E_VHS, value);
-
- pci_read_config_dword(pdev, PCIE_GLI_9763E_CFG, &value);
-
- if (enable)
- value &= ~GLI_9763E_CFG_LPSN_DIS;
- else
- value |= GLI_9763E_CFG_LPSN_DIS;
-
- pci_write_config_dword(pdev, PCIE_GLI_9763E_CFG, value);
-
- pci_read_config_dword(pdev, PCIE_GLI_9763E_VHS, &value);
- value &= ~GLI_9763E_VHS_REV;
- value |= FIELD_PREP(GLI_9763E_VHS_REV, GLI_9763E_VHS_REV_R);
- pci_write_config_dword(pdev, PCIE_GLI_9763E_VHS, value);
-}
-
static int gl9763e_runtime_suspend(struct sdhci_pci_chip *chip)
{
struct sdhci_pci_slot *slot = chip->slots[0];
diff --git a/drivers/mmc/host/sdhci-sprd.c b/drivers/mmc/host/sdhci-sprd.c
index 6b84ba27e6ab..6b8a57e2d20f 100644
--- a/drivers/mmc/host/sdhci-sprd.c
+++ b/drivers/mmc/host/sdhci-sprd.c
@@ -416,12 +416,33 @@ static void sdhci_sprd_request_done(struct sdhci_host *host,
mmc_request_done(host->mmc, mrq);
}
+static void sdhci_sprd_set_power(struct sdhci_host *host, unsigned char mode,
+ unsigned short vdd)
+{
+ struct mmc_host *mmc = host->mmc;
+
+ switch (mode) {
+ case MMC_POWER_OFF:
+ mmc_regulator_set_ocr(host->mmc, mmc->supply.vmmc, 0);
+
+ mmc_regulator_disable_vqmmc(mmc);
+ break;
+ case MMC_POWER_ON:
+ mmc_regulator_enable_vqmmc(mmc);
+ break;
+ case MMC_POWER_UP:
+ mmc_regulator_set_ocr(host->mmc, mmc->supply.vmmc, vdd);
+ break;
+ }
+}
+
static struct sdhci_ops sdhci_sprd_ops = {
.read_l = sdhci_sprd_readl,
.write_l = sdhci_sprd_writel,
.write_w = sdhci_sprd_writew,
.write_b = sdhci_sprd_writeb,
.set_clock = sdhci_sprd_set_clock,
+ .set_power = sdhci_sprd_set_power,
.get_max_clock = sdhci_sprd_get_max_clock,
.get_min_clock = sdhci_sprd_get_min_clock,
.set_bus_width = sdhci_set_bus_width,
@@ -823,6 +844,10 @@ static int sdhci_sprd_probe(struct platform_device *pdev)
host->caps1 &= ~(SDHCI_SUPPORT_SDR50 | SDHCI_SUPPORT_SDR104 |
SDHCI_SUPPORT_DDR50);
+ ret = mmc_regulator_get_supply(host->mmc);
+ if (ret)
+ goto pm_runtime_disable;
+
ret = sdhci_setup_host(host);
if (ret)
goto pm_runtime_disable;
diff --git a/drivers/net/arcnet/arcdevice.h b/drivers/net/arcnet/arcdevice.h
index 19e996a829c9..b54275389f8a 100644
--- a/drivers/net/arcnet/arcdevice.h
+++ b/drivers/net/arcnet/arcdevice.h
@@ -186,6 +186,8 @@ do { \
#define ARC_IS_5MBIT 1 /* card default speed is 5MBit */
#define ARC_CAN_10MBIT 2 /* card uses COM20022, supporting 10MBit,
but default is 2.5MBit. */
+#define ARC_HAS_LED 4 /* card has software controlled LEDs */
+#define ARC_HAS_ROTARY 8 /* card has rotary encoder */
/* information needed to define an encapsulation driver */
struct ArcProto {
diff --git a/drivers/net/arcnet/com20020-pci.c b/drivers/net/arcnet/com20020-pci.c
index c580acb8b1d3..7b5c8bb02f11 100644
--- a/drivers/net/arcnet/com20020-pci.c
+++ b/drivers/net/arcnet/com20020-pci.c
@@ -213,12 +213,13 @@ static int com20020pci_probe(struct pci_dev *pdev,
if (!strncmp(ci->name, "EAE PLX-PCI FB2", 15))
lp->backplane = 1;
- /* Get the dev_id from the PLX rotary coder */
- if (!strncmp(ci->name, "EAE PLX-PCI MA1", 15))
- dev_id_mask = 0x3;
- dev->dev_id = (inb(priv->misc + ci->rotary) >> 4) & dev_id_mask;
-
- snprintf(dev->name, sizeof(dev->name), "arc%d-%d", dev->dev_id, i);
+ if (ci->flags & ARC_HAS_ROTARY) {
+ /* Get the dev_id from the PLX rotary coder */
+ if (!strncmp(ci->name, "EAE PLX-PCI MA1", 15))
+ dev_id_mask = 0x3;
+ dev->dev_id = (inb(priv->misc + ci->rotary) >> 4) & dev_id_mask;
+ snprintf(dev->name, sizeof(dev->name), "arc%d-%d", dev->dev_id, i);
+ }
if (arcnet_inb(ioaddr, COM20020_REG_R_STATUS) == 0xFF) {
pr_err("IO address %Xh is empty!\n", ioaddr);
@@ -230,6 +231,10 @@ static int com20020pci_probe(struct pci_dev *pdev,
goto err_free_arcdev;
}
+ ret = com20020_found(dev, IRQF_SHARED);
+ if (ret)
+ goto err_free_arcdev;
+
card = devm_kzalloc(&pdev->dev, sizeof(struct com20020_dev),
GFP_KERNEL);
if (!card) {
@@ -239,41 +244,39 @@ static int com20020pci_probe(struct pci_dev *pdev,
card->index = i;
card->pci_priv = priv;
- card->tx_led.brightness_set = led_tx_set;
- card->tx_led.default_trigger = devm_kasprintf(&pdev->dev,
- GFP_KERNEL, "arc%d-%d-tx",
- dev->dev_id, i);
- card->tx_led.name = devm_kasprintf(&pdev->dev, GFP_KERNEL,
- "pci:green:tx:%d-%d",
- dev->dev_id, i);
-
- card->tx_led.dev = &dev->dev;
- card->recon_led.brightness_set = led_recon_set;
- card->recon_led.default_trigger = devm_kasprintf(&pdev->dev,
- GFP_KERNEL, "arc%d-%d-recon",
- dev->dev_id, i);
- card->recon_led.name = devm_kasprintf(&pdev->dev, GFP_KERNEL,
- "pci:red:recon:%d-%d",
- dev->dev_id, i);
- card->recon_led.dev = &dev->dev;
- card->dev = dev;
-
- ret = devm_led_classdev_register(&pdev->dev, &card->tx_led);
- if (ret)
- goto err_free_arcdev;
- ret = devm_led_classdev_register(&pdev->dev, &card->recon_led);
- if (ret)
- goto err_free_arcdev;
-
- dev_set_drvdata(&dev->dev, card);
-
- ret = com20020_found(dev, IRQF_SHARED);
- if (ret)
- goto err_free_arcdev;
-
- devm_arcnet_led_init(dev, dev->dev_id, i);
+ if (ci->flags & ARC_HAS_LED) {
+ card->tx_led.brightness_set = led_tx_set;
+ card->tx_led.default_trigger = devm_kasprintf(&pdev->dev,
+ GFP_KERNEL, "arc%d-%d-tx",
+ dev->dev_id, i);
+ card->tx_led.name = devm_kasprintf(&pdev->dev, GFP_KERNEL,
+ "pci:green:tx:%d-%d",
+ dev->dev_id, i);
+
+ card->tx_led.dev = &dev->dev;
+ card->recon_led.brightness_set = led_recon_set;
+ card->recon_led.default_trigger = devm_kasprintf(&pdev->dev,
+ GFP_KERNEL, "arc%d-%d-recon",
+ dev->dev_id, i);
+ card->recon_led.name = devm_kasprintf(&pdev->dev, GFP_KERNEL,
+ "pci:red:recon:%d-%d",
+ dev->dev_id, i);
+ card->recon_led.dev = &dev->dev;
+
+ ret = devm_led_classdev_register(&pdev->dev, &card->tx_led);
+ if (ret)
+ goto err_free_arcdev;
+
+ ret = devm_led_classdev_register(&pdev->dev, &card->recon_led);
+ if (ret)
+ goto err_free_arcdev;
+
+ dev_set_drvdata(&dev->dev, card);
+ devm_arcnet_led_init(dev, dev->dev_id, i);
+ }
+ card->dev = dev;
list_add(&card->list, &priv->list_dev);
continue;
@@ -329,7 +332,7 @@ static struct com20020_pci_card_info card_info_5mbit = {
};
static struct com20020_pci_card_info card_info_sohard = {
- .name = "PLX-PCI",
+ .name = "SOHARD SH ARC-PCI",
.devcount = 1,
/* SOHARD needs PCI base addr 4 */
.chan_map_tbl = {
@@ -364,7 +367,7 @@ static struct com20020_pci_card_info card_info_eae_arc1 = {
},
},
.rotary = 0x0,
- .flags = ARC_CAN_10MBIT,
+ .flags = ARC_HAS_ROTARY | ARC_HAS_LED | ARC_CAN_10MBIT,
};
static struct com20020_pci_card_info card_info_eae_ma1 = {
@@ -396,7 +399,7 @@ static struct com20020_pci_card_info card_info_eae_ma1 = {
},
},
.rotary = 0x0,
- .flags = ARC_CAN_10MBIT,
+ .flags = ARC_HAS_ROTARY | ARC_HAS_LED | ARC_CAN_10MBIT,
};
static struct com20020_pci_card_info card_info_eae_fb2 = {
@@ -421,7 +424,7 @@ static struct com20020_pci_card_info card_info_eae_fb2 = {
},
},
.rotary = 0x0,
- .flags = ARC_CAN_10MBIT,
+ .flags = ARC_HAS_ROTARY | ARC_HAS_LED | ARC_CAN_10MBIT,
};
static const struct pci_device_id com20020pci_id_table[] = {
diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c
index 3fed406fb46a..ff4b39601c93 100644
--- a/drivers/net/dsa/microchip/ksz_common.c
+++ b/drivers/net/dsa/microchip/ksz_common.c
@@ -2713,10 +2713,18 @@ static int ksz_connect_tag_protocol(struct dsa_switch *ds,
{
struct ksz_tagger_data *tagger_data;
- tagger_data = ksz_tagger_data(ds);
- tagger_data->xmit_work_fn = ksz_port_deferred_xmit;
-
- return 0;
+ switch (proto) {
+ case DSA_TAG_PROTO_KSZ8795:
+ return 0;
+ case DSA_TAG_PROTO_KSZ9893:
+ case DSA_TAG_PROTO_KSZ9477:
+ case DSA_TAG_PROTO_LAN937X:
+ tagger_data = ksz_tagger_data(ds);
+ tagger_data->xmit_work_fn = ksz_port_deferred_xmit;
+ return 0;
+ default:
+ return -EPROTONOSUPPORT;
+ }
}
static int ksz_port_vlan_filtering(struct dsa_switch *ds, int port,
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
index 42b1acaca33a..07a22c74fe81 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.c
+++ b/drivers/net/dsa/mv88e6xxx/chip.c
@@ -577,6 +577,18 @@ static void mv88e6250_phylink_get_caps(struct mv88e6xxx_chip *chip, int port,
config->mac_capabilities = MAC_SYM_PAUSE | MAC_10 | MAC_100;
}
+static void mv88e6351_phylink_get_caps(struct mv88e6xxx_chip *chip, int port,
+ struct phylink_config *config)
+{
+ unsigned long *supported = config->supported_interfaces;
+
+ /* Translate the default cmode */
+ mv88e6xxx_translate_cmode(chip->ports[port].cmode, supported);
+
+ config->mac_capabilities = MAC_SYM_PAUSE | MAC_10 | MAC_100 |
+ MAC_1000FD;
+}
+
static int mv88e6352_get_port4_serdes_cmode(struct mv88e6xxx_chip *chip)
{
u16 reg, val;
@@ -3880,7 +3892,8 @@ static int mv88e6xxx_port_setup(struct dsa_switch *ds, int port)
struct mv88e6xxx_chip *chip = ds->priv;
int err;
- if (chip->info->ops->pcs_ops->pcs_init) {
+ if (chip->info->ops->pcs_ops &&
+ chip->info->ops->pcs_ops->pcs_init) {
err = chip->info->ops->pcs_ops->pcs_init(chip, port);
if (err)
return err;
@@ -3895,7 +3908,8 @@ static void mv88e6xxx_port_teardown(struct dsa_switch *ds, int port)
mv88e6xxx_teardown_devlink_regions_port(ds, port);
- if (chip->info->ops->pcs_ops->pcs_teardown)
+ if (chip->info->ops->pcs_ops &&
+ chip->info->ops->pcs_ops->pcs_teardown)
chip->info->ops->pcs_ops->pcs_teardown(chip, port);
}
@@ -4340,7 +4354,7 @@ static const struct mv88e6xxx_ops mv88e6171_ops = {
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
.stu_getnext = mv88e6352_g1_stu_getnext,
.stu_loadpurge = mv88e6352_g1_stu_loadpurge,
- .phylink_get_caps = mv88e6185_phylink_get_caps,
+ .phylink_get_caps = mv88e6351_phylink_get_caps,
};
static const struct mv88e6xxx_ops mv88e6172_ops = {
@@ -4440,7 +4454,7 @@ static const struct mv88e6xxx_ops mv88e6175_ops = {
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
.stu_getnext = mv88e6352_g1_stu_getnext,
.stu_loadpurge = mv88e6352_g1_stu_loadpurge,
- .phylink_get_caps = mv88e6185_phylink_get_caps,
+ .phylink_get_caps = mv88e6351_phylink_get_caps,
};
static const struct mv88e6xxx_ops mv88e6176_ops = {
@@ -5069,7 +5083,7 @@ static const struct mv88e6xxx_ops mv88e6350_ops = {
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
.stu_getnext = mv88e6352_g1_stu_getnext,
.stu_loadpurge = mv88e6352_g1_stu_loadpurge,
- .phylink_get_caps = mv88e6185_phylink_get_caps,
+ .phylink_get_caps = mv88e6351_phylink_get_caps,
};
static const struct mv88e6xxx_ops mv88e6351_ops = {
@@ -5117,7 +5131,7 @@ static const struct mv88e6xxx_ops mv88e6351_ops = {
.stu_loadpurge = mv88e6352_g1_stu_loadpurge,
.avb_ops = &mv88e6352_avb_ops,
.ptp_ops = &mv88e6352_ptp_ops,
- .phylink_get_caps = mv88e6185_phylink_get_caps,
+ .phylink_get_caps = mv88e6351_phylink_get_caps,
};
static const struct mv88e6xxx_ops mv88e6352_ops = {
diff --git a/drivers/net/dsa/mv88e6xxx/pcs-639x.c b/drivers/net/dsa/mv88e6xxx/pcs-639x.c
index 9a8429f5d09c..d758a6c1b226 100644
--- a/drivers/net/dsa/mv88e6xxx/pcs-639x.c
+++ b/drivers/net/dsa/mv88e6xxx/pcs-639x.c
@@ -465,6 +465,7 @@ mv88e639x_pcs_select(struct mv88e6xxx_chip *chip, int port,
case PHY_INTERFACE_MODE_10GBASER:
case PHY_INTERFACE_MODE_XAUI:
case PHY_INTERFACE_MODE_RXAUI:
+ case PHY_INTERFACE_MODE_USXGMII:
return &mpcs->xg_pcs;
default:
@@ -873,7 +874,8 @@ static int mv88e6393x_xg_pcs_post_config(struct phylink_pcs *pcs,
struct mv88e639x_pcs *mpcs = xg_pcs_to_mv88e639x_pcs(pcs);
int err;
- if (interface == PHY_INTERFACE_MODE_10GBASER) {
+ if (interface == PHY_INTERFACE_MODE_10GBASER ||
+ interface == PHY_INTERFACE_MODE_USXGMII) {
err = mv88e6393x_erratum_5_2(mpcs);
if (err)
return err;
@@ -886,12 +888,37 @@ static int mv88e6393x_xg_pcs_post_config(struct phylink_pcs *pcs,
return mv88e639x_xg_pcs_enable(mpcs);
}
+static void mv88e6393x_xg_pcs_get_state(struct phylink_pcs *pcs,
+ struct phylink_link_state *state)
+{
+ struct mv88e639x_pcs *mpcs = xg_pcs_to_mv88e639x_pcs(pcs);
+ u16 status, lp_status;
+ int err;
+
+ if (state->interface != PHY_INTERFACE_MODE_USXGMII)
+ return mv88e639x_xg_pcs_get_state(pcs, state);
+
+ state->link = false;
+
+ err = mv88e639x_read(mpcs, MV88E6390_USXGMII_PHY_STATUS, &status);
+ err = err ? : mv88e639x_read(mpcs, MV88E6390_USXGMII_LP_STATUS, &lp_status);
+ if (err) {
+ dev_err(mpcs->mdio.dev.parent,
+ "can't read USXGMII status: %pe\n", ERR_PTR(err));
+ return;
+ }
+
+ state->link = !!(status & MDIO_USXGMII_LINK);
+ state->an_complete = state->link;
+ phylink_decode_usxgmii_word(state, lp_status);
+}
+
static const struct phylink_pcs_ops mv88e6393x_xg_pcs_ops = {
.pcs_enable = mv88e6393x_xg_pcs_enable,
.pcs_disable = mv88e6393x_xg_pcs_disable,
.pcs_pre_config = mv88e6393x_xg_pcs_pre_config,
.pcs_post_config = mv88e6393x_xg_pcs_post_config,
- .pcs_get_state = mv88e639x_xg_pcs_get_state,
+ .pcs_get_state = mv88e6393x_xg_pcs_get_state,
.pcs_config = mv88e639x_xg_pcs_config,
};
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ptp.c b/drivers/net/ethernet/aquantia/atlantic/aq_ptp.c
index 80b44043e6c5..28c9b6f1a54f 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ptp.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ptp.c
@@ -553,17 +553,17 @@ void aq_ptp_tx_hwtstamp(struct aq_nic_s *aq_nic, u64 timestamp)
/* aq_ptp_rx_hwtstamp - utility function which checks for RX time stamp
* @adapter: pointer to adapter struct
- * @skb: particular skb to send timestamp with
+ * @shhwtstamps: particular skb_shared_hwtstamps to save timestamp
*
* if the timestamp is valid, we convert it into the timecounter ns
* value, then store that result into the hwtstamps structure which
* is passed up the network stack
*/
-static void aq_ptp_rx_hwtstamp(struct aq_ptp_s *aq_ptp, struct sk_buff *skb,
+static void aq_ptp_rx_hwtstamp(struct aq_ptp_s *aq_ptp, struct skb_shared_hwtstamps *shhwtstamps,
u64 timestamp)
{
timestamp -= atomic_read(&aq_ptp->offset_ingress);
- aq_ptp_convert_to_hwtstamp(aq_ptp, skb_hwtstamps(skb), timestamp);
+ aq_ptp_convert_to_hwtstamp(aq_ptp, shhwtstamps, timestamp);
}
void aq_ptp_hwtstamp_config_get(struct aq_ptp_s *aq_ptp,
@@ -639,7 +639,7 @@ bool aq_ptp_ring(struct aq_nic_s *aq_nic, struct aq_ring_s *ring)
&aq_ptp->ptp_rx == ring || &aq_ptp->hwts_rx == ring;
}
-u16 aq_ptp_extract_ts(struct aq_nic_s *aq_nic, struct sk_buff *skb, u8 *p,
+u16 aq_ptp_extract_ts(struct aq_nic_s *aq_nic, struct skb_shared_hwtstamps *shhwtstamps, u8 *p,
unsigned int len)
{
struct aq_ptp_s *aq_ptp = aq_nic->aq_ptp;
@@ -648,7 +648,7 @@ u16 aq_ptp_extract_ts(struct aq_nic_s *aq_nic, struct sk_buff *skb, u8 *p,
p, len, &timestamp);
if (ret > 0)
- aq_ptp_rx_hwtstamp(aq_ptp, skb, timestamp);
+ aq_ptp_rx_hwtstamp(aq_ptp, shhwtstamps, timestamp);
return ret;
}
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ptp.h b/drivers/net/ethernet/aquantia/atlantic/aq_ptp.h
index 28ccb7ca2df9..210b723f2207 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ptp.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ptp.h
@@ -67,7 +67,7 @@ int aq_ptp_hwtstamp_config_set(struct aq_ptp_s *aq_ptp,
/* Return either ring is belong to PTP or not*/
bool aq_ptp_ring(struct aq_nic_s *aq_nic, struct aq_ring_s *ring);
-u16 aq_ptp_extract_ts(struct aq_nic_s *aq_nic, struct sk_buff *skb, u8 *p,
+u16 aq_ptp_extract_ts(struct aq_nic_s *aq_nic, struct skb_shared_hwtstamps *shhwtstamps, u8 *p,
unsigned int len);
struct ptp_clock *aq_ptp_get_ptp_clock(struct aq_ptp_s *aq_ptp);
@@ -143,7 +143,7 @@ static inline bool aq_ptp_ring(struct aq_nic_s *aq_nic, struct aq_ring_s *ring)
}
static inline u16 aq_ptp_extract_ts(struct aq_nic_s *aq_nic,
- struct sk_buff *skb, u8 *p,
+ struct skb_shared_hwtstamps *shhwtstamps, u8 *p,
unsigned int len)
{
return 0;
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
index 4de22eed099a..694daeaf3e61 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
@@ -647,7 +647,7 @@ static int __aq_ring_rx_clean(struct aq_ring_s *self, struct napi_struct *napi,
}
if (is_ptp_ring)
buff->len -=
- aq_ptp_extract_ts(self->aq_nic, skb,
+ aq_ptp_extract_ts(self->aq_nic, skb_hwtstamps(skb),
aq_buf_vaddr(&buff->rxdata),
buff->len);
@@ -742,6 +742,8 @@ static int __aq_ring_xdp_clean(struct aq_ring_s *rx_ring,
struct aq_ring_buff_s *buff = &rx_ring->buff_ring[rx_ring->sw_head];
bool is_ptp_ring = aq_ptp_ring(rx_ring->aq_nic, rx_ring);
struct aq_ring_buff_s *buff_ = NULL;
+ u16 ptp_hwtstamp_len = 0;
+ struct skb_shared_hwtstamps shhwtstamps;
struct sk_buff *skb = NULL;
unsigned int next_ = 0U;
struct xdp_buff xdp;
@@ -810,11 +812,12 @@ static int __aq_ring_xdp_clean(struct aq_ring_s *rx_ring,
hard_start = page_address(buff->rxdata.page) +
buff->rxdata.pg_off - rx_ring->page_offset;
- if (is_ptp_ring)
- buff->len -=
- aq_ptp_extract_ts(rx_ring->aq_nic, skb,
- aq_buf_vaddr(&buff->rxdata),
- buff->len);
+ if (is_ptp_ring) {
+ ptp_hwtstamp_len = aq_ptp_extract_ts(rx_ring->aq_nic, &shhwtstamps,
+ aq_buf_vaddr(&buff->rxdata),
+ buff->len);
+ buff->len -= ptp_hwtstamp_len;
+ }
xdp_init_buff(&xdp, frame_sz, &rx_ring->xdp_rxq);
xdp_prepare_buff(&xdp, hard_start, rx_ring->page_offset,
@@ -834,6 +837,9 @@ static int __aq_ring_xdp_clean(struct aq_ring_s *rx_ring,
if (IS_ERR(skb) || !skb)
continue;
+ if (ptp_hwtstamp_len > 0)
+ *skb_hwtstamps(skb) = shhwtstamps;
+
if (buff->is_vlan)
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
buff->vlan_rx_tag);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
index 38d89d80b4a9..273c9ba48f09 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
@@ -2075,6 +2075,7 @@ destroy_flow_table:
rhashtable_destroy(&tc_info->flow_table);
free_tc_info:
kfree(tc_info);
+ bp->tc_info = NULL;
return rc;
}
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 48b6191efa56..f52830dfb26a 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -6474,6 +6474,14 @@ static void tg3_dump_state(struct tg3 *tp)
int i;
u32 *regs;
+ /* If it is a PCI error, all registers will be 0xffff,
+ * we don't dump them out, just report the error and return
+ */
+ if (tp->pdev->error_state != pci_channel_io_normal) {
+ netdev_err(tp->dev, "PCI channel ERROR!\n");
+ return;
+ }
+
regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
if (!regs)
return;
@@ -11259,7 +11267,8 @@ static void tg3_reset_task(struct work_struct *work)
rtnl_lock();
tg3_full_lock(tp, 0);
- if (tp->pcierr_recovery || !netif_running(tp->dev)) {
+ if (tp->pcierr_recovery || !netif_running(tp->dev) ||
+ tp->pdev->error_state != pci_channel_io_normal) {
tg3_flag_clear(tp, RESET_TASK_PENDING);
tg3_full_unlock(tp);
rtnl_unlock();
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
index 15bab41cee48..888509cf1f21 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
@@ -516,8 +516,6 @@ struct sk_buff *dpaa2_eth_alloc_skb(struct dpaa2_eth_priv *priv,
memcpy(skb->data, fd_vaddr + fd_offset, fd_length);
- dpaa2_eth_recycle_buf(priv, ch, dpaa2_fd_get_addr(fd));
-
return skb;
}
@@ -589,6 +587,7 @@ void dpaa2_eth_rx(struct dpaa2_eth_priv *priv,
struct rtnl_link_stats64 *percpu_stats;
struct dpaa2_eth_drv_stats *percpu_extras;
struct device *dev = priv->net_dev->dev.parent;
+ bool recycle_rx_buf = false;
void *buf_data;
u32 xdp_act;
@@ -618,6 +617,8 @@ void dpaa2_eth_rx(struct dpaa2_eth_priv *priv,
dma_unmap_page(dev, addr, priv->rx_buf_size,
DMA_BIDIRECTIONAL);
skb = dpaa2_eth_build_linear_skb(ch, fd, vaddr);
+ } else {
+ recycle_rx_buf = true;
}
} else if (fd_format == dpaa2_fd_sg) {
WARN_ON(priv->xdp_prog);
@@ -637,6 +638,9 @@ void dpaa2_eth_rx(struct dpaa2_eth_priv *priv,
goto err_build_skb;
dpaa2_eth_receive_skb(priv, ch, fd, vaddr, fq, percpu_stats, skb);
+
+ if (recycle_rx_buf)
+ dpaa2_eth_recycle_buf(priv, ch, dpaa2_fd_get_addr(fd));
return;
err_build_skb:
@@ -1073,14 +1077,12 @@ static int dpaa2_eth_build_single_fd(struct dpaa2_eth_priv *priv,
dma_addr_t addr;
buffer_start = skb->data - dpaa2_eth_needed_headroom(skb);
-
- /* If there's enough room to align the FD address, do it.
- * It will help hardware optimize accesses.
- */
aligned_start = PTR_ALIGN(buffer_start - DPAA2_ETH_TX_BUF_ALIGN,
DPAA2_ETH_TX_BUF_ALIGN);
if (aligned_start >= skb->head)
buffer_start = aligned_start;
+ else
+ return -ENOMEM;
/* Store a backpointer to the skb at the beginning of the buffer
* (in the private data area) such that we can release it
@@ -4967,6 +4969,8 @@ static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev)
if (err)
goto err_dl_port_add;
+ net_dev->needed_headroom = DPAA2_ETH_SWA_SIZE + DPAA2_ETH_TX_BUF_ALIGN;
+
err = register_netdev(net_dev);
if (err < 0) {
dev_err(dev, "register_netdev() failed\n");
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
index bfb6c96c3b2f..834cba8c3a41 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
@@ -740,7 +740,7 @@ static inline bool dpaa2_eth_rx_pause_enabled(u64 link_options)
static inline unsigned int dpaa2_eth_needed_headroom(struct sk_buff *skb)
{
- unsigned int headroom = DPAA2_ETH_SWA_SIZE;
+ unsigned int headroom = DPAA2_ETH_SWA_SIZE + DPAA2_ETH_TX_BUF_ALIGN;
/* If we don't have an skb (e.g. XDP buffer), we only need space for
* the software annotation area
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
index 928d934cb21a..f75668c47935 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
@@ -66,6 +66,27 @@ static enum mac_mode hns_get_enet_interface(const struct hns_mac_cb *mac_cb)
}
}
+static u32 hns_mac_link_anti_shake(struct mac_driver *mac_ctrl_drv)
+{
+#define HNS_MAC_LINK_WAIT_TIME 5
+#define HNS_MAC_LINK_WAIT_CNT 40
+
+ u32 link_status = 0;
+ int i;
+
+ if (!mac_ctrl_drv->get_link_status)
+ return link_status;
+
+ for (i = 0; i < HNS_MAC_LINK_WAIT_CNT; i++) {
+ msleep(HNS_MAC_LINK_WAIT_TIME);
+ mac_ctrl_drv->get_link_status(mac_ctrl_drv, &link_status);
+ if (!link_status)
+ break;
+ }
+
+ return link_status;
+}
+
void hns_mac_get_link_status(struct hns_mac_cb *mac_cb, u32 *link_status)
{
struct mac_driver *mac_ctrl_drv;
@@ -83,6 +104,14 @@ void hns_mac_get_link_status(struct hns_mac_cb *mac_cb, u32 *link_status)
&sfp_prsnt);
if (!ret)
*link_status = *link_status && sfp_prsnt;
+
+ /* for FIBER port, it may have a fake link up.
+ * when the link status changes from down to up, we need to do
+ * anti-shake. the anti-shake time is base on tests.
+ * only FIBER port need to do this.
+ */
+ if (*link_status && !mac_cb->link)
+ *link_status = hns_mac_link_anti_shake(mac_ctrl_drv);
}
mac_cb->link = *link_status;
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
index 0900abf5c508..8a713eed4465 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
@@ -142,7 +142,8 @@ MODULE_DEVICE_TABLE(acpi, hns_enet_acpi_match);
static void fill_desc(struct hnae_ring *ring, void *priv,
int size, dma_addr_t dma, int frag_end,
- int buf_num, enum hns_desc_type type, int mtu)
+ int buf_num, enum hns_desc_type type, int mtu,
+ bool is_gso)
{
struct hnae_desc *desc = &ring->desc[ring->next_to_use];
struct hnae_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use];
@@ -275,6 +276,15 @@ static int hns_nic_maybe_stop_tso(
return 0;
}
+static int hns_nic_maybe_stop_tx_v2(struct sk_buff **out_skb, int *bnum,
+ struct hnae_ring *ring)
+{
+ if (skb_is_gso(*out_skb))
+ return hns_nic_maybe_stop_tso(out_skb, bnum, ring);
+ else
+ return hns_nic_maybe_stop_tx(out_skb, bnum, ring);
+}
+
static void fill_tso_desc(struct hnae_ring *ring, void *priv,
int size, dma_addr_t dma, int frag_end,
int buf_num, enum hns_desc_type type, int mtu)
@@ -300,6 +310,19 @@ static void fill_tso_desc(struct hnae_ring *ring, void *priv,
mtu);
}
+static void fill_desc_v2(struct hnae_ring *ring, void *priv,
+ int size, dma_addr_t dma, int frag_end,
+ int buf_num, enum hns_desc_type type, int mtu,
+ bool is_gso)
+{
+ if (is_gso)
+ fill_tso_desc(ring, priv, size, dma, frag_end, buf_num, type,
+ mtu);
+ else
+ fill_v2_desc(ring, priv, size, dma, frag_end, buf_num, type,
+ mtu);
+}
+
netdev_tx_t hns_nic_net_xmit_hw(struct net_device *ndev,
struct sk_buff *skb,
struct hns_nic_ring_data *ring_data)
@@ -313,6 +336,7 @@ netdev_tx_t hns_nic_net_xmit_hw(struct net_device *ndev,
int seg_num;
dma_addr_t dma;
int size, next_to_use;
+ bool is_gso;
int i;
switch (priv->ops.maybe_stop_tx(&skb, &buf_num, ring)) {
@@ -339,8 +363,9 @@ netdev_tx_t hns_nic_net_xmit_hw(struct net_device *ndev,
ring->stats.sw_err_cnt++;
goto out_err_tx_ok;
}
+ is_gso = skb_is_gso(skb);
priv->ops.fill_desc(ring, skb, size, dma, seg_num == 1 ? 1 : 0,
- buf_num, DESC_TYPE_SKB, ndev->mtu);
+ buf_num, DESC_TYPE_SKB, ndev->mtu, is_gso);
/* fill the fragments */
for (i = 1; i < seg_num; i++) {
@@ -354,7 +379,7 @@ netdev_tx_t hns_nic_net_xmit_hw(struct net_device *ndev,
}
priv->ops.fill_desc(ring, skb_frag_page(frag), size, dma,
seg_num - 1 == i ? 1 : 0, buf_num,
- DESC_TYPE_PAGE, ndev->mtu);
+ DESC_TYPE_PAGE, ndev->mtu, is_gso);
}
/*complete translate all packets*/
@@ -1776,15 +1801,6 @@ static int hns_nic_set_features(struct net_device *netdev,
netdev_info(netdev, "enet v1 do not support tso!\n");
break;
default:
- if (features & (NETIF_F_TSO | NETIF_F_TSO6)) {
- priv->ops.fill_desc = fill_tso_desc;
- priv->ops.maybe_stop_tx = hns_nic_maybe_stop_tso;
- /* The chip only support 7*4096 */
- netif_set_tso_max_size(netdev, 7 * 4096);
- } else {
- priv->ops.fill_desc = fill_v2_desc;
- priv->ops.maybe_stop_tx = hns_nic_maybe_stop_tx;
- }
break;
}
netdev->features = features;
@@ -2159,16 +2175,9 @@ static void hns_nic_set_priv_ops(struct net_device *netdev)
priv->ops.maybe_stop_tx = hns_nic_maybe_stop_tx;
} else {
priv->ops.get_rxd_bnum = get_v2rx_desc_bnum;
- if ((netdev->features & NETIF_F_TSO) ||
- (netdev->features & NETIF_F_TSO6)) {
- priv->ops.fill_desc = fill_tso_desc;
- priv->ops.maybe_stop_tx = hns_nic_maybe_stop_tso;
- /* This chip only support 7*4096 */
- netif_set_tso_max_size(netdev, 7 * 4096);
- } else {
- priv->ops.fill_desc = fill_v2_desc;
- priv->ops.maybe_stop_tx = hns_nic_maybe_stop_tx;
- }
+ priv->ops.fill_desc = fill_desc_v2;
+ priv->ops.maybe_stop_tx = hns_nic_maybe_stop_tx_v2;
+ netif_set_tso_max_size(netdev, 7 * 4096);
/* enable tso when init
* control tso on/off through TSE bit in bd
*/
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.h b/drivers/net/ethernet/hisilicon/hns/hns_enet.h
index ffa9d6573f54..3f3ee032f631 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_enet.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.h
@@ -44,7 +44,8 @@ struct hns_nic_ring_data {
struct hns_nic_ops {
void (*fill_desc)(struct hnae_ring *ring, void *priv,
int size, dma_addr_t dma, int frag_end,
- int buf_num, enum hns_desc_type type, int mtu);
+ int buf_num, enum hns_desc_type type, int mtu,
+ bool is_gso);
int (*maybe_stop_tx)(struct sk_buff **out_skb,
int *bnum, struct hnae_ring *ring);
void (*get_rxd_bnum)(u32 bnum_flag, int *out_bnum);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index f7a332e51524..1ab8dbe2d880 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -16224,7 +16224,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
I40E_PRTGL_SAH_MFS_MASK) >> I40E_PRTGL_SAH_MFS_SHIFT;
if (val < MAX_FRAME_SIZE_DEFAULT)
dev_warn(&pdev->dev, "MFS for port %x has been set below the default: %x\n",
- i, val);
+ pf->hw.port, val);
/* Add a filter to drop all Flow control frames from any VSI from being
* transmitted. By doing so we stop a malicious VF from sending out
diff --git a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
index 6f236d1a6444..19cbfe554689 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
@@ -827,18 +827,10 @@ static int __iavf_set_coalesce(struct net_device *netdev,
struct iavf_adapter *adapter = netdev_priv(netdev);
int i;
- if (ec->rx_coalesce_usecs == 0) {
- if (ec->use_adaptive_rx_coalesce)
- netif_info(adapter, drv, netdev, "rx-usecs=0, need to disable adaptive-rx for a complete disable\n");
- } else if ((ec->rx_coalesce_usecs < IAVF_MIN_ITR) ||
- (ec->rx_coalesce_usecs > IAVF_MAX_ITR)) {
+ if (ec->rx_coalesce_usecs > IAVF_MAX_ITR) {
netif_info(adapter, drv, netdev, "Invalid value, rx-usecs range is 0-8160\n");
return -EINVAL;
- } else if (ec->tx_coalesce_usecs == 0) {
- if (ec->use_adaptive_tx_coalesce)
- netif_info(adapter, drv, netdev, "tx-usecs=0, need to disable adaptive-tx for a complete disable\n");
- } else if ((ec->tx_coalesce_usecs < IAVF_MIN_ITR) ||
- (ec->tx_coalesce_usecs > IAVF_MAX_ITR)) {
+ } else if (ec->tx_coalesce_usecs > IAVF_MAX_ITR) {
netif_info(adapter, drv, netdev, "Invalid value, tx-usecs range is 0-8160\n");
return -EINVAL;
}
diff --git a/drivers/net/ethernet/intel/iavf/iavf_txrx.h b/drivers/net/ethernet/intel/iavf/iavf_txrx.h
index 7e6ee32d19b6..10ba36602c0c 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_txrx.h
+++ b/drivers/net/ethernet/intel/iavf/iavf_txrx.h
@@ -15,7 +15,6 @@
*/
#define IAVF_ITR_DYNAMIC 0x8000 /* use top bit as a flag */
#define IAVF_ITR_MASK 0x1FFE /* mask for ITR register value */
-#define IAVF_MIN_ITR 2 /* reg uses 2 usec resolution */
#define IAVF_ITR_100K 10 /* all values below must be even */
#define IAVF_ITR_50K 20
#define IAVF_ITR_20K 50
diff --git a/drivers/net/ethernet/intel/ice/ice_lag.c b/drivers/net/ethernet/intel/ice/ice_lag.c
index cd065ec48c87..280994ee5933 100644
--- a/drivers/net/ethernet/intel/ice/ice_lag.c
+++ b/drivers/net/ethernet/intel/ice/ice_lag.c
@@ -570,6 +570,50 @@ resume_traffic:
}
/**
+ * ice_lag_build_netdev_list - populate the lag struct's netdev list
+ * @lag: local lag struct
+ * @ndlist: pointer to netdev list to populate
+ */
+static void ice_lag_build_netdev_list(struct ice_lag *lag,
+ struct ice_lag_netdev_list *ndlist)
+{
+ struct ice_lag_netdev_list *nl;
+ struct net_device *tmp_nd;
+
+ INIT_LIST_HEAD(&ndlist->node);
+ rcu_read_lock();
+ for_each_netdev_in_bond_rcu(lag->upper_netdev, tmp_nd) {
+ nl = kzalloc(sizeof(*nl), GFP_ATOMIC);
+ if (!nl)
+ break;
+
+ nl->netdev = tmp_nd;
+ list_add(&nl->node, &ndlist->node);
+ }
+ rcu_read_unlock();
+ lag->netdev_head = &ndlist->node;
+}
+
+/**
+ * ice_lag_destroy_netdev_list - free lag struct's netdev list
+ * @lag: pointer to local lag struct
+ * @ndlist: pointer to lag struct netdev list
+ */
+static void ice_lag_destroy_netdev_list(struct ice_lag *lag,
+ struct ice_lag_netdev_list *ndlist)
+{
+ struct ice_lag_netdev_list *entry, *n;
+
+ rcu_read_lock();
+ list_for_each_entry_safe(entry, n, &ndlist->node, node) {
+ list_del(&entry->node);
+ kfree(entry);
+ }
+ rcu_read_unlock();
+ lag->netdev_head = NULL;
+}
+
+/**
* ice_lag_move_single_vf_nodes - Move Tx scheduling nodes for single VF
* @lag: primary interface LAG struct
* @oldport: lport of previous interface
@@ -597,7 +641,6 @@ ice_lag_move_single_vf_nodes(struct ice_lag *lag, u8 oldport, u8 newport,
void ice_lag_move_new_vf_nodes(struct ice_vf *vf)
{
struct ice_lag_netdev_list ndlist;
- struct list_head *tmp, *n;
u8 pri_port, act_port;
struct ice_lag *lag;
struct ice_vsi *vsi;
@@ -621,38 +664,15 @@ void ice_lag_move_new_vf_nodes(struct ice_vf *vf)
pri_port = pf->hw.port_info->lport;
act_port = lag->active_port;
- if (lag->upper_netdev) {
- struct ice_lag_netdev_list *nl;
- struct net_device *tmp_nd;
-
- INIT_LIST_HEAD(&ndlist.node);
- rcu_read_lock();
- for_each_netdev_in_bond_rcu(lag->upper_netdev, tmp_nd) {
- nl = kzalloc(sizeof(*nl), GFP_ATOMIC);
- if (!nl)
- break;
-
- nl->netdev = tmp_nd;
- list_add(&nl->node, &ndlist.node);
- }
- rcu_read_unlock();
- }
-
- lag->netdev_head = &ndlist.node;
+ if (lag->upper_netdev)
+ ice_lag_build_netdev_list(lag, &ndlist);
if (ice_is_feature_supported(pf, ICE_F_SRIOV_LAG) &&
lag->bonded && lag->primary && pri_port != act_port &&
!list_empty(lag->netdev_head))
ice_lag_move_single_vf_nodes(lag, pri_port, act_port, vsi->idx);
- list_for_each_safe(tmp, n, &ndlist.node) {
- struct ice_lag_netdev_list *entry;
-
- entry = list_entry(tmp, struct ice_lag_netdev_list, node);
- list_del(&entry->node);
- kfree(entry);
- }
- lag->netdev_head = NULL;
+ ice_lag_destroy_netdev_list(lag, &ndlist);
new_vf_unlock:
mutex_unlock(&pf->lag_mutex);
@@ -679,6 +699,29 @@ static void ice_lag_move_vf_nodes(struct ice_lag *lag, u8 oldport, u8 newport)
ice_lag_move_single_vf_nodes(lag, oldport, newport, i);
}
+/**
+ * ice_lag_move_vf_nodes_cfg - move vf nodes outside LAG netdev event context
+ * @lag: local lag struct
+ * @src_prt: lport value for source port
+ * @dst_prt: lport value for destination port
+ *
+ * This function is used to move nodes during an out-of-netdev-event situation,
+ * primarily when the driver needs to reconfigure or recreate resources.
+ *
+ * Must be called while holding the lag_mutex to avoid lag events from
+ * processing while out-of-sync moves are happening. Also, paired moves,
+ * such as used in a reset flow, should both be called under the same mutex
+ * lock to avoid changes between start of reset and end of reset.
+ */
+void ice_lag_move_vf_nodes_cfg(struct ice_lag *lag, u8 src_prt, u8 dst_prt)
+{
+ struct ice_lag_netdev_list ndlist;
+
+ ice_lag_build_netdev_list(lag, &ndlist);
+ ice_lag_move_vf_nodes(lag, src_prt, dst_prt);
+ ice_lag_destroy_netdev_list(lag, &ndlist);
+}
+
#define ICE_LAG_SRIOV_CP_RECIPE 10
#define ICE_LAG_SRIOV_TRAIN_PKT_LEN 16
@@ -2051,7 +2094,6 @@ void ice_lag_rebuild(struct ice_pf *pf)
{
struct ice_lag_netdev_list ndlist;
struct ice_lag *lag, *prim_lag;
- struct list_head *tmp, *n;
u8 act_port, loc_port;
if (!pf->lag || !pf->lag->bonded)
@@ -2063,21 +2105,7 @@ void ice_lag_rebuild(struct ice_pf *pf)
if (lag->primary) {
prim_lag = lag;
} else {
- struct ice_lag_netdev_list *nl;
- struct net_device *tmp_nd;
-
- INIT_LIST_HEAD(&ndlist.node);
- rcu_read_lock();
- for_each_netdev_in_bond_rcu(lag->upper_netdev, tmp_nd) {
- nl = kzalloc(sizeof(*nl), GFP_ATOMIC);
- if (!nl)
- break;
-
- nl->netdev = tmp_nd;
- list_add(&nl->node, &ndlist.node);
- }
- rcu_read_unlock();
- lag->netdev_head = &ndlist.node;
+ ice_lag_build_netdev_list(lag, &ndlist);
prim_lag = ice_lag_find_primary(lag);
}
@@ -2107,13 +2135,7 @@ void ice_lag_rebuild(struct ice_pf *pf)
ice_clear_rdma_cap(pf);
lag_rebuild_out:
- list_for_each_safe(tmp, n, &ndlist.node) {
- struct ice_lag_netdev_list *entry;
-
- entry = list_entry(tmp, struct ice_lag_netdev_list, node);
- list_del(&entry->node);
- kfree(entry);
- }
+ ice_lag_destroy_netdev_list(lag, &ndlist);
mutex_unlock(&pf->lag_mutex);
}
diff --git a/drivers/net/ethernet/intel/ice/ice_lag.h b/drivers/net/ethernet/intel/ice/ice_lag.h
index 9557e8605a07..ede833dfa658 100644
--- a/drivers/net/ethernet/intel/ice/ice_lag.h
+++ b/drivers/net/ethernet/intel/ice/ice_lag.h
@@ -65,4 +65,5 @@ int ice_init_lag(struct ice_pf *pf);
void ice_deinit_lag(struct ice_pf *pf);
void ice_lag_rebuild(struct ice_pf *pf);
bool ice_lag_is_switchdev_running(struct ice_pf *pf);
+void ice_lag_move_vf_nodes_cfg(struct ice_lag *lag, u8 src_prt, u8 dst_prt);
#endif /* _ICE_LAG_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_sriov.c b/drivers/net/ethernet/intel/ice/ice_sriov.c
index 2a5e6616cc0a..e1494f24f661 100644
--- a/drivers/net/ethernet/intel/ice/ice_sriov.c
+++ b/drivers/net/ethernet/intel/ice/ice_sriov.c
@@ -374,16 +374,11 @@ static void ice_ena_vf_mappings(struct ice_vf *vf)
*/
int ice_calc_vf_reg_idx(struct ice_vf *vf, struct ice_q_vector *q_vector)
{
- struct ice_pf *pf;
-
if (!vf || !q_vector)
return -EINVAL;
- pf = vf->pf;
-
/* always add one to account for the OICR being the first MSIX */
- return pf->sriov_base_vector + pf->vfs.num_msix_per * vf->vf_id +
- q_vector->v_idx + 1;
+ return vf->first_vector_idx + q_vector->v_idx + 1;
}
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_vf_lib.c b/drivers/net/ethernet/intel/ice/ice_vf_lib.c
index aca1f2ea5034..b7ae09952156 100644
--- a/drivers/net/ethernet/intel/ice/ice_vf_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_vf_lib.c
@@ -829,12 +829,16 @@ static void ice_notify_vf_reset(struct ice_vf *vf)
int ice_reset_vf(struct ice_vf *vf, u32 flags)
{
struct ice_pf *pf = vf->pf;
+ struct ice_lag *lag;
struct ice_vsi *vsi;
+ u8 act_prt, pri_prt;
struct device *dev;
int err = 0;
bool rsd;
dev = ice_pf_to_dev(pf);
+ act_prt = ICE_LAG_INVALID_PORT;
+ pri_prt = pf->hw.port_info->lport;
if (flags & ICE_VF_RESET_NOTIFY)
ice_notify_vf_reset(vf);
@@ -845,6 +849,17 @@ int ice_reset_vf(struct ice_vf *vf, u32 flags)
return 0;
}
+ lag = pf->lag;
+ mutex_lock(&pf->lag_mutex);
+ if (lag && lag->bonded && lag->primary) {
+ act_prt = lag->active_port;
+ if (act_prt != pri_prt && act_prt != ICE_LAG_INVALID_PORT &&
+ lag->upper_netdev)
+ ice_lag_move_vf_nodes_cfg(lag, act_prt, pri_prt);
+ else
+ act_prt = ICE_LAG_INVALID_PORT;
+ }
+
if (flags & ICE_VF_RESET_LOCK)
mutex_lock(&vf->cfg_lock);
else
@@ -937,6 +952,11 @@ out_unlock:
if (flags & ICE_VF_RESET_LOCK)
mutex_unlock(&vf->cfg_lock);
+ if (lag && lag->bonded && lag->primary &&
+ act_prt != ICE_LAG_INVALID_PORT)
+ ice_lag_move_vf_nodes_cfg(lag, pri_prt, act_prt);
+ mutex_unlock(&pf->lag_mutex);
+
return err;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_vf_vsi_vlan_ops.c b/drivers/net/ethernet/intel/ice/ice_vf_vsi_vlan_ops.c
index d7b10dc67f03..80dc4bcdd3a4 100644
--- a/drivers/net/ethernet/intel/ice/ice_vf_vsi_vlan_ops.c
+++ b/drivers/net/ethernet/intel/ice/ice_vf_vsi_vlan_ops.c
@@ -32,7 +32,6 @@ static void ice_port_vlan_on(struct ice_vsi *vsi)
/* setup outer VLAN ops */
vlan_ops->set_port_vlan = ice_vsi_set_outer_port_vlan;
vlan_ops->clear_port_vlan = ice_vsi_clear_outer_port_vlan;
- vlan_ops->clear_port_vlan = ice_vsi_clear_outer_port_vlan;
/* setup inner VLAN ops */
vlan_ops = &vsi->inner_vlan_ops;
@@ -47,8 +46,13 @@ static void ice_port_vlan_on(struct ice_vsi *vsi)
vlan_ops->set_port_vlan = ice_vsi_set_inner_port_vlan;
vlan_ops->clear_port_vlan = ice_vsi_clear_inner_port_vlan;
- vlan_ops->clear_port_vlan = ice_vsi_clear_inner_port_vlan;
}
+
+ /* all Rx traffic should be in the domain of the assigned port VLAN,
+ * so prevent disabling Rx VLAN filtering
+ */
+ vlan_ops->dis_rx_filtering = noop_vlan;
+
vlan_ops->ena_rx_filtering = ice_vsi_ena_rx_vlan_filtering;
}
@@ -77,6 +81,8 @@ static void ice_port_vlan_off(struct ice_vsi *vsi)
vlan_ops->del_vlan = ice_vsi_del_vlan;
}
+ vlan_ops->dis_rx_filtering = ice_vsi_dis_rx_vlan_filtering;
+
if (!test_bit(ICE_FLAG_VF_VLAN_PRUNING, pf->flags))
vlan_ops->ena_rx_filtering = noop_vlan;
else
@@ -141,7 +147,6 @@ void ice_vf_vsi_init_vlan_ops(struct ice_vsi *vsi)
&vsi->outer_vlan_ops : &vsi->inner_vlan_ops;
vlan_ops->add_vlan = ice_vsi_add_vlan;
- vlan_ops->dis_rx_filtering = ice_vsi_dis_rx_vlan_filtering;
vlan_ops->ena_tx_filtering = ice_vsi_ena_tx_vlan_filtering;
vlan_ops->dis_tx_filtering = ice_vsi_dis_tx_vlan_filtering;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl.c b/drivers/net/ethernet/intel/ice/ice_virtchnl.c
index cdf17b1e2f25..1c7b4ded948b 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl.c
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl.c
@@ -1523,7 +1523,6 @@ static int ice_vc_cfg_irq_map_msg(struct ice_vf *vf, u8 *msg)
u16 num_q_vectors_mapped, vsi_id, vector_id;
struct virtchnl_irq_map_info *irqmap_info;
struct virtchnl_vector_map *map;
- struct ice_pf *pf = vf->pf;
struct ice_vsi *vsi;
int i;
@@ -1535,7 +1534,7 @@ static int ice_vc_cfg_irq_map_msg(struct ice_vf *vf, u8 *msg)
* there is actually at least a single VF queue vector mapped
*/
if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) ||
- pf->vfs.num_msix_per < num_q_vectors_mapped ||
+ vf->num_msix < num_q_vectors_mapped ||
!num_q_vectors_mapped) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
@@ -1557,7 +1556,7 @@ static int ice_vc_cfg_irq_map_msg(struct ice_vf *vf, u8 *msg)
/* vector_id is always 0-based for each VF, and can never be
* larger than or equal to the max allowed interrupts per VF
*/
- if (!(vector_id < pf->vfs.num_msix_per) ||
+ if (!(vector_id < vf->num_msix) ||
!ice_vc_isvalid_vsi_id(vf, vsi_id) ||
(!vector_id && (map->rxq_map || map->txq_map))) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
@@ -1603,9 +1602,24 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
(struct virtchnl_vsi_queue_config_info *)msg;
struct virtchnl_queue_pair_info *qpi;
struct ice_pf *pf = vf->pf;
+ struct ice_lag *lag;
struct ice_vsi *vsi;
+ u8 act_prt, pri_prt;
int i = -1, q_idx;
+ lag = pf->lag;
+ mutex_lock(&pf->lag_mutex);
+ act_prt = ICE_LAG_INVALID_PORT;
+ pri_prt = pf->hw.port_info->lport;
+ if (lag && lag->bonded && lag->primary) {
+ act_prt = lag->active_port;
+ if (act_prt != pri_prt && act_prt != ICE_LAG_INVALID_PORT &&
+ lag->upper_netdev)
+ ice_lag_move_vf_nodes_cfg(lag, act_prt, pri_prt);
+ else
+ act_prt = ICE_LAG_INVALID_PORT;
+ }
+
if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states))
goto error_param;
@@ -1729,6 +1743,11 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
}
}
+ if (lag && lag->bonded && lag->primary &&
+ act_prt != ICE_LAG_INVALID_PORT)
+ ice_lag_move_vf_nodes_cfg(lag, pri_prt, act_prt);
+ mutex_unlock(&pf->lag_mutex);
+
/* send the response to the VF */
return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES,
VIRTCHNL_STATUS_SUCCESS, NULL, 0);
@@ -1743,6 +1762,11 @@ error_param:
vf->vf_id, i);
}
+ if (lag && lag->bonded && lag->primary &&
+ act_prt != ICE_LAG_INVALID_PORT)
+ ice_lag_move_vf_nodes_cfg(lag, pri_prt, act_prt);
+ mutex_unlock(&pf->lag_mutex);
+
ice_lag_move_new_vf_nodes(vf);
/* send the response to the VF */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
index 6845556581c3..5df42634ceb8 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
@@ -1945,7 +1945,7 @@ struct mcs_hw_info {
u8 tcam_entries; /* RX/TX Tcam entries per mcs block */
u8 secy_entries; /* RX/TX SECY entries per mcs block */
u8 sc_entries; /* RX/TX SC CAM entries per mcs block */
- u8 sa_entries; /* PN table entries = SA entries */
+ u16 sa_entries; /* PN table entries = SA entries */
u64 rsvd[16];
};
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mcs.c b/drivers/net/ethernet/marvell/octeontx2/af/mcs.c
index c43f19dfbd74..c1775bd01c2b 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/mcs.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mcs.c
@@ -117,7 +117,7 @@ void mcs_get_rx_secy_stats(struct mcs *mcs, struct mcs_secy_stats *stats, int id
reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYTAGGEDCTLX(id);
stats->pkt_tagged_ctl_cnt = mcs_reg_read(mcs, reg);
- reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYUNTAGGEDORNOTAGX(id);
+ reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYUNTAGGEDX(id);
stats->pkt_untaged_cnt = mcs_reg_read(mcs, reg);
reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYCTLX(id);
@@ -215,7 +215,7 @@ void mcs_get_sc_stats(struct mcs *mcs, struct mcs_sc_stats *stats,
reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSCNOTVALIDX(id);
stats->pkt_notvalid_cnt = mcs_reg_read(mcs, reg);
- reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSCUNCHECKEDOROKX(id);
+ reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSCUNCHECKEDX(id);
stats->pkt_unchecked_cnt = mcs_reg_read(mcs, reg);
if (mcs->hw->mcs_blks > 1) {
@@ -1219,6 +1219,17 @@ struct mcs *mcs_get_pdata(int mcs_id)
return NULL;
}
+bool is_mcs_bypass(int mcs_id)
+{
+ struct mcs *mcs_dev;
+
+ list_for_each_entry(mcs_dev, &mcs_list, mcs_list) {
+ if (mcs_dev->mcs_id == mcs_id)
+ return mcs_dev->bypass;
+ }
+ return true;
+}
+
void mcs_set_port_cfg(struct mcs *mcs, struct mcs_port_cfg_set_req *req)
{
u64 val = 0;
@@ -1436,7 +1447,7 @@ static int mcs_x2p_calibration(struct mcs *mcs)
return err;
}
-static void mcs_set_external_bypass(struct mcs *mcs, u8 bypass)
+static void mcs_set_external_bypass(struct mcs *mcs, bool bypass)
{
u64 val;
@@ -1447,6 +1458,7 @@ static void mcs_set_external_bypass(struct mcs *mcs, u8 bypass)
else
val &= ~BIT_ULL(6);
mcs_reg_write(mcs, MCSX_MIL_GLOBAL, val);
+ mcs->bypass = bypass;
}
static void mcs_global_cfg(struct mcs *mcs)
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mcs.h b/drivers/net/ethernet/marvell/octeontx2/af/mcs.h
index 0f89dcb76465..f927cc61dfd2 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/mcs.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mcs.h
@@ -149,6 +149,7 @@ struct mcs {
u16 num_vec;
void *rvu;
u16 *tx_sa_active;
+ bool bypass;
};
struct mcs_ops {
@@ -206,6 +207,7 @@ void mcs_get_custom_tag_cfg(struct mcs *mcs, struct mcs_custom_tag_cfg_get_req *
int mcs_alloc_ctrlpktrule(struct rsrc_bmap *rsrc, u16 *pf_map, u16 offset, u16 pcifunc);
int mcs_free_ctrlpktrule(struct mcs *mcs, struct mcs_free_ctrl_pkt_rule_req *req);
int mcs_ctrlpktrule_write(struct mcs *mcs, struct mcs_ctrl_pkt_rule_write_req *req);
+bool is_mcs_bypass(int mcs_id);
/* CN10K-B APIs */
void cn10kb_mcs_set_hw_capabilities(struct mcs *mcs);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mcs_reg.h b/drivers/net/ethernet/marvell/octeontx2/af/mcs_reg.h
index f3ab01fc363c..f4c6de89002c 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/mcs_reg.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mcs_reg.h
@@ -810,14 +810,37 @@
offset = 0x9d8ull; \
offset; })
+#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSCUNCHECKEDX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0xee80ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xe818ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYUNTAGGEDX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0xa680ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xd018ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSCLATEORDELAYEDX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0xf680ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xe018ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
#define MCSX_CSE_RX_MEM_SLAVE_INOCTETSSCDECRYPTEDX(a) (0xe680ull + (a) * 0x8ull)
#define MCSX_CSE_RX_MEM_SLAVE_INOCTETSSCVALIDATEX(a) (0xde80ull + (a) * 0x8ull)
-#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYUNTAGGEDORNOTAGX(a) (0xa680ull + (a) * 0x8ull)
#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYNOTAGX(a) (0xd218 + (a) * 0x8ull)
-#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYUNTAGGEDX(a) (0xd018ull + (a) * 0x8ull)
-#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSCUNCHECKEDOROKX(a) (0xee80ull + (a) * 0x8ull)
#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYCTLX(a) (0xb680ull + (a) * 0x8ull)
-#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSCLATEORDELAYEDX(a) (0xf680ull + (a) * 0x8ull)
#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSAINVALIDX(a) (0x12680ull + (a) * 0x8ull)
#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSANOTUSINGSAERRORX(a) (0x15680ull + (a) * 0x8ull)
#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSANOTVALIDX(a) (0x13680ull + (a) * 0x8ull)
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
index 22c395c7d040..731bb82b577c 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
@@ -2631,6 +2631,9 @@ static void __rvu_flr_handler(struct rvu *rvu, u16 pcifunc)
rvu_npc_free_mcam_entries(rvu, pcifunc, -1);
rvu_mac_reset(rvu, pcifunc);
+ if (rvu->mcs_blk_cnt)
+ rvu_mcs_flr_handler(rvu, pcifunc);
+
mutex_unlock(&rvu->flr_lock);
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
index c4d999ef5ab4..cce2806aaa50 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
@@ -345,6 +345,7 @@ struct nix_hw {
struct nix_txvlan txvlan;
struct nix_ipolicer *ipolicer;
u64 *tx_credits;
+ u8 cc_mcs_cnt;
};
/* RVU block's capabilities or functionality,
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c
index c70932625d0d..62780d8b4f9a 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c
@@ -1087,7 +1087,7 @@ static int rvu_npa_register_reporters(struct rvu_devlink *rvu_dl)
rvu_dl->devlink_wq = create_workqueue("rvu_devlink_wq");
if (!rvu_dl->devlink_wq)
- goto err;
+ return -ENOMEM;
INIT_WORK(&rvu_reporters->intr_work, rvu_npa_intr_work);
INIT_WORK(&rvu_reporters->err_work, rvu_npa_err_work);
@@ -1095,9 +1095,6 @@ static int rvu_npa_register_reporters(struct rvu_devlink *rvu_dl)
INIT_WORK(&rvu_reporters->ras_work, rvu_npa_ras_work);
return 0;
-err:
- rvu_npa_health_reporters_destroy(rvu_dl);
- return -ENOMEM;
}
static int rvu_npa_health_reporters_create(struct rvu_devlink *rvu_dl)
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
index 23c2f2ed2fb8..4227ebb4a758 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
@@ -12,6 +12,7 @@
#include "rvu_reg.h"
#include "rvu.h"
#include "npc.h"
+#include "mcs.h"
#include "cgx.h"
#include "lmac_common.h"
#include "rvu_npc_hash.h"
@@ -4389,6 +4390,12 @@ static void nix_link_config(struct rvu *rvu, int blkaddr,
SDP_HW_MAX_FRS << 16 | NIC_HW_MIN_FRS);
}
+ /* Get MCS external bypass status for CN10K-B */
+ if (mcs_get_blkcnt() == 1) {
+ /* Adjust for 2 credits when external bypass is disabled */
+ nix_hw->cc_mcs_cnt = is_mcs_bypass(0) ? 0 : 2;
+ }
+
/* Set credits for Tx links assuming max packet length allowed.
* This will be reconfigured based on MTU set for PF/VF.
*/
@@ -4412,6 +4419,7 @@ static void nix_link_config(struct rvu *rvu, int blkaddr,
tx_credits = (lmac_fifo_len - lmac_max_frs) / 16;
/* Enable credits and set credit pkt count to max allowed */
cfg = (tx_credits << 12) | (0x1FF << 2) | BIT_ULL(1);
+ cfg |= FIELD_PREP(NIX_AF_LINKX_MCS_CNT_MASK, nix_hw->cc_mcs_cnt);
link = iter + slink;
nix_hw->tx_credits[link] = tx_credits;
@@ -5505,6 +5513,8 @@ int rvu_mbox_handler_nix_bandprof_free(struct rvu *rvu,
ipolicer = &nix_hw->ipolicer[layer];
for (idx = 0; idx < req->prof_count[layer]; idx++) {
+ if (idx == MAX_BANDPROF_PER_PFFUNC)
+ break;
prof_idx = req->prof_idx[layer][idx];
if (prof_idx >= ipolicer->band_prof.max ||
ipolicer->pfvf_map[prof_idx] != pcifunc)
@@ -5518,8 +5528,6 @@ int rvu_mbox_handler_nix_bandprof_free(struct rvu *rvu,
ipolicer->pfvf_map[prof_idx] = 0x00;
ipolicer->match_id[prof_idx] = 0;
rvu_free_rsrc(&ipolicer->band_prof, prof_idx);
- if (idx == MAX_BANDPROF_PER_PFFUNC)
- break;
}
}
mutex_unlock(&rvu->rsrc_lock);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
index 16cfc802e348..f65805860c8d 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
@@ -389,7 +389,13 @@ static u64 npc_get_default_entry_action(struct rvu *rvu, struct npc_mcam *mcam,
int bank, nixlf, index;
/* get ucast entry rule entry index */
- nix_get_nixlf(rvu, pf_func, &nixlf, NULL);
+ if (nix_get_nixlf(rvu, pf_func, &nixlf, NULL)) {
+ dev_err(rvu->dev, "%s: nixlf not attached to pcifunc:0x%x\n",
+ __func__, pf_func);
+ /* Action 0 is drop */
+ return 0;
+ }
+
index = npc_get_nixlf_mcam_index(mcam, pf_func, nixlf,
NIXLF_UCAST_ENTRY);
bank = npc_get_bank(mcam, index);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.c
index b3150f053291..d46ac29adb96 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.c
@@ -31,8 +31,8 @@ static struct hw_reg_map txsch_reg_map[NIX_TXSCH_LVL_CNT] = {
{NIX_TXSCH_LVL_TL4, 3, 0xFFFF, {{0x0B00, 0x0B08}, {0x0B10, 0x0B18},
{0x1200, 0x12E0} } },
{NIX_TXSCH_LVL_TL3, 4, 0xFFFF, {{0x1000, 0x10E0}, {0x1600, 0x1608},
- {0x1610, 0x1618}, {0x1700, 0x17B0} } },
- {NIX_TXSCH_LVL_TL2, 2, 0xFFFF, {{0x0E00, 0x0EE0}, {0x1700, 0x17B0} } },
+ {0x1610, 0x1618}, {0x1700, 0x17C8} } },
+ {NIX_TXSCH_LVL_TL2, 2, 0xFFFF, {{0x0E00, 0x0EE0}, {0x1700, 0x17C8} } },
{NIX_TXSCH_LVL_TL1, 1, 0xFFFF, {{0x0C00, 0x0D98} } },
};
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h
index b42e631e52d0..18c1c9f361cc 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h
@@ -437,6 +437,7 @@
#define NIX_AF_LINKX_BASE_MASK GENMASK_ULL(11, 0)
#define NIX_AF_LINKX_RANGE_MASK GENMASK_ULL(19, 16)
+#define NIX_AF_LINKX_MCS_CNT_MASK GENMASK_ULL(33, 32)
/* SSO */
#define SSO_AF_CONST (0x1000)
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c
index a4a258da8dd5..c1c99d7054f8 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c
@@ -450,6 +450,9 @@ int cn10k_set_ipolicer_rate(struct otx2_nic *pfvf, u16 profile,
aq->prof.pebs_mantissa = 0;
aq->prof_mask.pebs_mantissa = 0xFF;
+ aq->prof.hl_en = 0;
+ aq->prof_mask.hl_en = 1;
+
/* Fill AQ info */
aq->qidx = profile;
aq->ctype = NIX_AQ_CTYPE_BANDPROF;
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
index e7c69b57147e..06910307085e 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
@@ -1070,6 +1070,8 @@ int otx2_init_tc(struct otx2_nic *nic);
void otx2_shutdown_tc(struct otx2_nic *nic);
int otx2_setup_tc(struct net_device *netdev, enum tc_setup_type type,
void *type_data);
+void otx2_tc_apply_ingress_police_rules(struct otx2_nic *nic);
+
/* CGX/RPM DMAC filters support */
int otx2_dmacflt_get_max_cnt(struct otx2_nic *pf);
int otx2_dmacflt_add(struct otx2_nic *pf, const u8 *mac, u32 bit_pos);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
index 9efcec549834..53f6258a973c 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
@@ -334,9 +334,12 @@ static void otx2_get_pauseparam(struct net_device *netdev,
if (is_otx2_lbkvf(pfvf->pdev))
return;
+ mutex_lock(&pfvf->mbox.lock);
req = otx2_mbox_alloc_msg_cgx_cfg_pause_frm(&pfvf->mbox);
- if (!req)
+ if (!req) {
+ mutex_unlock(&pfvf->mbox.lock);
return;
+ }
if (!otx2_sync_mbox_msg(&pfvf->mbox)) {
rsp = (struct cgx_pause_frm_cfg *)
@@ -344,6 +347,7 @@ static void otx2_get_pauseparam(struct net_device *netdev,
pause->rx_pause = rsp->rx_pause;
pause->tx_pause = rsp->tx_pause;
}
+ mutex_unlock(&pfvf->mbox.lock);
}
static int otx2_set_pauseparam(struct net_device *netdev,
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
index ba95ac913274..0c17ebdda148 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
@@ -566,7 +566,9 @@ static irqreturn_t otx2_pfvf_mbox_intr_handler(int irq, void *pf_irq)
otx2_write64(pf, RVU_PF_VFPF_MBOX_INTX(1), intr);
otx2_queue_work(mbox, pf->mbox_pfvf_wq, 64, vfs, intr,
TYPE_PFVF);
- vfs -= 64;
+ if (intr)
+ trace_otx2_msg_interrupt(mbox->mbox.pdev, "VF(s) to PF", intr);
+ vfs = 64;
}
intr = otx2_read64(pf, RVU_PF_VFPF_MBOX_INTX(0));
@@ -574,7 +576,8 @@ static irqreturn_t otx2_pfvf_mbox_intr_handler(int irq, void *pf_irq)
otx2_queue_work(mbox, pf->mbox_pfvf_wq, 0, vfs, intr, TYPE_PFVF);
- trace_otx2_msg_interrupt(mbox->mbox.pdev, "VF(s) to PF", intr);
+ if (intr)
+ trace_otx2_msg_interrupt(mbox->mbox.pdev, "VF(s) to PF", intr);
return IRQ_HANDLED;
}
@@ -1685,6 +1688,14 @@ static void otx2_do_set_rx_mode(struct otx2_nic *pf)
mutex_unlock(&pf->mbox.lock);
}
+static void otx2_set_irq_coalesce(struct otx2_nic *pfvf)
+{
+ int cint;
+
+ for (cint = 0; cint < pfvf->hw.cint_cnt; cint++)
+ otx2_config_irq_coalescing(pfvf, cint);
+}
+
static void otx2_dim_work(struct work_struct *w)
{
struct dim_cq_moder cur_moder;
@@ -1700,6 +1711,7 @@ static void otx2_dim_work(struct work_struct *w)
CQ_TIMER_THRESH_MAX : cur_moder.usec;
pfvf->hw.cq_ecount_wait = (cur_moder.pkts > NAPI_POLL_WEIGHT) ?
NAPI_POLL_WEIGHT : cur_moder.pkts;
+ otx2_set_irq_coalesce(pfvf);
dim->state = DIM_START_MEASURE;
}
@@ -1870,6 +1882,8 @@ int otx2_open(struct net_device *netdev)
if (pf->flags & OTX2_FLAG_DMACFLTR_SUPPORT)
otx2_dmacflt_reinstall_flows(pf);
+ otx2_tc_apply_ingress_police_rules(pf);
+
err = otx2_rxtx_enable(pf, true);
/* If a mbox communication error happens at this point then interface
* will end up in a state such that it is in down state but hardware
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
index 8a5e3987a482..db1e0e0e812d 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
@@ -47,6 +47,9 @@ struct otx2_tc_flow {
bool is_act_police;
u32 prio;
struct npc_install_flow_req req;
+ u64 rate;
+ u32 burst;
+ bool is_pps;
};
static void otx2_get_egress_burst_cfg(struct otx2_nic *nic, u32 burst,
@@ -284,21 +287,10 @@ static int otx2_tc_egress_matchall_delete(struct otx2_nic *nic,
return err;
}
-static int otx2_tc_act_set_police(struct otx2_nic *nic,
- struct otx2_tc_flow *node,
- struct flow_cls_offload *f,
- u64 rate, u32 burst, u32 mark,
- struct npc_install_flow_req *req, bool pps)
+static int otx2_tc_act_set_hw_police(struct otx2_nic *nic,
+ struct otx2_tc_flow *node)
{
- struct netlink_ext_ack *extack = f->common.extack;
- struct otx2_hw *hw = &nic->hw;
- int rq_idx, rc;
-
- rq_idx = find_first_zero_bit(&nic->rq_bmap, hw->rx_queues);
- if (rq_idx >= hw->rx_queues) {
- NL_SET_ERR_MSG_MOD(extack, "Police action rules exceeded");
- return -EINVAL;
- }
+ int rc;
mutex_lock(&nic->mbox.lock);
@@ -308,23 +300,17 @@ static int otx2_tc_act_set_police(struct otx2_nic *nic,
return rc;
}
- rc = cn10k_set_ipolicer_rate(nic, node->leaf_profile, burst, rate, pps);
+ rc = cn10k_set_ipolicer_rate(nic, node->leaf_profile,
+ node->burst, node->rate, node->is_pps);
if (rc)
goto free_leaf;
- rc = cn10k_map_unmap_rq_policer(nic, rq_idx, node->leaf_profile, true);
+ rc = cn10k_map_unmap_rq_policer(nic, node->rq, node->leaf_profile, true);
if (rc)
goto free_leaf;
mutex_unlock(&nic->mbox.lock);
- req->match_id = mark & 0xFFFFULL;
- req->index = rq_idx;
- req->op = NIX_RX_ACTIONOP_UCAST;
- set_bit(rq_idx, &nic->rq_bmap);
- node->is_act_police = true;
- node->rq = rq_idx;
-
return 0;
free_leaf:
@@ -336,6 +322,39 @@ free_leaf:
return rc;
}
+static int otx2_tc_act_set_police(struct otx2_nic *nic,
+ struct otx2_tc_flow *node,
+ struct flow_cls_offload *f,
+ u64 rate, u32 burst, u32 mark,
+ struct npc_install_flow_req *req, bool pps)
+{
+ struct netlink_ext_ack *extack = f->common.extack;
+ struct otx2_hw *hw = &nic->hw;
+ int rq_idx, rc;
+
+ rq_idx = find_first_zero_bit(&nic->rq_bmap, hw->rx_queues);
+ if (rq_idx >= hw->rx_queues) {
+ NL_SET_ERR_MSG_MOD(extack, "Police action rules exceeded");
+ return -EINVAL;
+ }
+
+ req->match_id = mark & 0xFFFFULL;
+ req->index = rq_idx;
+ req->op = NIX_RX_ACTIONOP_UCAST;
+
+ node->is_act_police = true;
+ node->rq = rq_idx;
+ node->burst = burst;
+ node->rate = rate;
+ node->is_pps = pps;
+
+ rc = otx2_tc_act_set_hw_police(nic, node);
+ if (!rc)
+ set_bit(rq_idx, &nic->rq_bmap);
+
+ return rc;
+}
+
static int otx2_tc_parse_actions(struct otx2_nic *nic,
struct flow_action *flow_action,
struct npc_install_flow_req *req,
@@ -1044,6 +1063,11 @@ static int otx2_tc_del_flow(struct otx2_nic *nic,
}
if (flow_node->is_act_police) {
+ __clear_bit(flow_node->rq, &nic->rq_bmap);
+
+ if (nic->flags & OTX2_FLAG_INTF_DOWN)
+ goto free_mcam_flow;
+
mutex_lock(&nic->mbox.lock);
err = cn10k_map_unmap_rq_policer(nic, flow_node->rq,
@@ -1059,11 +1083,10 @@ static int otx2_tc_del_flow(struct otx2_nic *nic,
"Unable to free leaf bandwidth profile(%d)\n",
flow_node->leaf_profile);
- __clear_bit(flow_node->rq, &nic->rq_bmap);
-
mutex_unlock(&nic->mbox.lock);
}
+free_mcam_flow:
otx2_del_mcam_flow_entry(nic, flow_node->entry, NULL);
otx2_tc_update_mcam_table(nic, flow_cfg, flow_node, false);
kfree_rcu(flow_node, rcu);
@@ -1083,6 +1106,11 @@ static int otx2_tc_add_flow(struct otx2_nic *nic,
if (!(nic->flags & OTX2_FLAG_TC_FLOWER_SUPPORT))
return -ENOMEM;
+ if (nic->flags & OTX2_FLAG_INTF_DOWN) {
+ NL_SET_ERR_MSG_MOD(extack, "Interface not initialized");
+ return -EINVAL;
+ }
+
if (flow_cfg->nr_flows == flow_cfg->max_flows) {
NL_SET_ERR_MSG_MOD(extack,
"Free MCAM entry not available to add the flow");
@@ -1442,3 +1470,45 @@ void otx2_shutdown_tc(struct otx2_nic *nic)
otx2_destroy_tc_flow_list(nic);
}
EXPORT_SYMBOL(otx2_shutdown_tc);
+
+static void otx2_tc_config_ingress_rule(struct otx2_nic *nic,
+ struct otx2_tc_flow *node)
+{
+ struct npc_install_flow_req *req;
+
+ if (otx2_tc_act_set_hw_police(nic, node))
+ return;
+
+ mutex_lock(&nic->mbox.lock);
+
+ req = otx2_mbox_alloc_msg_npc_install_flow(&nic->mbox);
+ if (!req)
+ goto err;
+
+ memcpy(req, &node->req, sizeof(struct npc_install_flow_req));
+
+ if (otx2_sync_mbox_msg(&nic->mbox))
+ netdev_err(nic->netdev,
+ "Failed to install MCAM flow entry for ingress rule");
+err:
+ mutex_unlock(&nic->mbox.lock);
+}
+
+void otx2_tc_apply_ingress_police_rules(struct otx2_nic *nic)
+{
+ struct otx2_flow_config *flow_cfg = nic->flow_cfg;
+ struct otx2_tc_flow *node;
+
+ /* If any ingress policer rules exist for the interface then
+ * apply those rules. Ingress policer rules depend on bandwidth
+ * profiles linked to the receive queues. Since no receive queues
+ * exist when interface is down, ingress policer rules are stored
+ * and configured in hardware after all receive queues are allocated
+ * in otx2_open.
+ */
+ list_for_each_entry(node, &flow_cfg->flow_list_tc, list) {
+ if (node->is_act_police)
+ otx2_tc_config_ingress_rule(nic, node);
+ }
+}
+EXPORT_SYMBOL(otx2_tc_apply_ingress_police_rules);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
index 6ee15f3c25ed..4d519ea833b2 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
@@ -512,11 +512,18 @@ static void otx2_adjust_adaptive_coalese(struct otx2_nic *pfvf, struct otx2_cq_p
{
struct dim_sample dim_sample;
u64 rx_frames, rx_bytes;
+ u64 tx_frames, tx_bytes;
rx_frames = OTX2_GET_RX_STATS(RX_BCAST) + OTX2_GET_RX_STATS(RX_MCAST) +
OTX2_GET_RX_STATS(RX_UCAST);
rx_bytes = OTX2_GET_RX_STATS(RX_OCTS);
- dim_update_sample(pfvf->napi_events, rx_frames, rx_bytes, &dim_sample);
+ tx_bytes = OTX2_GET_TX_STATS(TX_OCTS);
+ tx_frames = OTX2_GET_TX_STATS(TX_UCAST);
+
+ dim_update_sample(pfvf->napi_events,
+ rx_frames + tx_frames,
+ rx_bytes + tx_bytes,
+ &dim_sample);
net_dim(&cq_poll->dim, dim_sample);
}
@@ -558,16 +565,9 @@ int otx2_napi_handler(struct napi_struct *napi, int budget)
if (pfvf->flags & OTX2_FLAG_INTF_DOWN)
return workdone;
- /* Check for adaptive interrupt coalesce */
- if (workdone != 0 &&
- ((pfvf->flags & OTX2_FLAG_ADPTV_INT_COAL_ENABLED) ==
- OTX2_FLAG_ADPTV_INT_COAL_ENABLED)) {
- /* Adjust irq coalese using net_dim */
+ /* Adjust irq coalese using net_dim */
+ if (pfvf->flags & OTX2_FLAG_ADPTV_INT_COAL_ENABLED)
otx2_adjust_adaptive_coalese(pfvf, cq_poll);
- /* Update irq coalescing */
- for (i = 0; i < pfvf->hw.cint_cnt; i++)
- otx2_config_irq_coalescing(pfvf, i);
- }
if (unlikely(!filled_cnt)) {
struct refill_work *work;
diff --git a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
index 060a77f2265d..e522845c7c21 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
@@ -160,6 +160,18 @@ struct nfp_tun_mac_addr_offload {
u8 addr[ETH_ALEN];
};
+/**
+ * struct nfp_neigh_update_work - update neighbour information to nfp
+ * @work: Work queue for writing neigh to the nfp
+ * @n: neighbour entry
+ * @app: Back pointer to app
+ */
+struct nfp_neigh_update_work {
+ struct work_struct work;
+ struct neighbour *n;
+ struct nfp_app *app;
+};
+
enum nfp_flower_mac_offload_cmd {
NFP_TUNNEL_MAC_OFFLOAD_ADD = 0,
NFP_TUNNEL_MAC_OFFLOAD_DEL = 1,
@@ -607,38 +619,30 @@ err:
nfp_flower_cmsg_warn(app, "Neighbour configuration failed.\n");
}
-static int
-nfp_tun_neigh_event_handler(struct notifier_block *nb, unsigned long event,
- void *ptr)
+static void
+nfp_tun_release_neigh_update_work(struct nfp_neigh_update_work *update_work)
{
- struct nfp_flower_priv *app_priv;
- struct netevent_redirect *redir;
- struct neighbour *n;
+ neigh_release(update_work->n);
+ kfree(update_work);
+}
+
+static void nfp_tun_neigh_update(struct work_struct *work)
+{
+ struct nfp_neigh_update_work *update_work;
struct nfp_app *app;
+ struct neighbour *n;
bool neigh_invalid;
int err;
- switch (event) {
- case NETEVENT_REDIRECT:
- redir = (struct netevent_redirect *)ptr;
- n = redir->neigh;
- break;
- case NETEVENT_NEIGH_UPDATE:
- n = (struct neighbour *)ptr;
- break;
- default:
- return NOTIFY_DONE;
- }
-
- neigh_invalid = !(n->nud_state & NUD_VALID) || n->dead;
-
- app_priv = container_of(nb, struct nfp_flower_priv, tun.neigh_nb);
- app = app_priv->app;
+ update_work = container_of(work, struct nfp_neigh_update_work, work);
+ app = update_work->app;
+ n = update_work->n;
if (!nfp_flower_get_port_id_from_netdev(app, n->dev))
- return NOTIFY_DONE;
+ goto out;
#if IS_ENABLED(CONFIG_INET)
+ neigh_invalid = !(n->nud_state & NUD_VALID) || n->dead;
if (n->tbl->family == AF_INET6) {
#if IS_ENABLED(CONFIG_IPV6)
struct flowi6 flow6 = {};
@@ -655,13 +659,11 @@ nfp_tun_neigh_event_handler(struct notifier_block *nb, unsigned long event,
dst = ip6_dst_lookup_flow(dev_net(n->dev), NULL,
&flow6, NULL);
if (IS_ERR(dst))
- return NOTIFY_DONE;
+ goto out;
dst_release(dst);
}
nfp_tun_write_neigh(n->dev, app, &flow6, n, true, false);
-#else
- return NOTIFY_DONE;
#endif /* CONFIG_IPV6 */
} else {
struct flowi4 flow4 = {};
@@ -678,17 +680,71 @@ nfp_tun_neigh_event_handler(struct notifier_block *nb, unsigned long event,
rt = ip_route_output_key(dev_net(n->dev), &flow4);
err = PTR_ERR_OR_ZERO(rt);
if (err)
- return NOTIFY_DONE;
+ goto out;
ip_rt_put(rt);
}
nfp_tun_write_neigh(n->dev, app, &flow4, n, false, false);
}
-#else
- return NOTIFY_DONE;
#endif /* CONFIG_INET */
+out:
+ nfp_tun_release_neigh_update_work(update_work);
+}
- return NOTIFY_OK;
+static struct nfp_neigh_update_work *
+nfp_tun_alloc_neigh_update_work(struct nfp_app *app, struct neighbour *n)
+{
+ struct nfp_neigh_update_work *update_work;
+
+ update_work = kzalloc(sizeof(*update_work), GFP_ATOMIC);
+ if (!update_work)
+ return NULL;
+
+ INIT_WORK(&update_work->work, nfp_tun_neigh_update);
+ neigh_hold(n);
+ update_work->n = n;
+ update_work->app = app;
+
+ return update_work;
+}
+
+static int
+nfp_tun_neigh_event_handler(struct notifier_block *nb, unsigned long event,
+ void *ptr)
+{
+ struct nfp_neigh_update_work *update_work;
+ struct nfp_flower_priv *app_priv;
+ struct netevent_redirect *redir;
+ struct neighbour *n;
+ struct nfp_app *app;
+
+ switch (event) {
+ case NETEVENT_REDIRECT:
+ redir = (struct netevent_redirect *)ptr;
+ n = redir->neigh;
+ break;
+ case NETEVENT_NEIGH_UPDATE:
+ n = (struct neighbour *)ptr;
+ break;
+ default:
+ return NOTIFY_DONE;
+ }
+#if IS_ENABLED(CONFIG_IPV6)
+ if (n->tbl != ipv6_stub->nd_tbl && n->tbl != &arp_tbl)
+#else
+ if (n->tbl != &arp_tbl)
+#endif
+ return NOTIFY_DONE;
+
+ app_priv = container_of(nb, struct nfp_flower_priv, tun.neigh_nb);
+ app = app_priv->app;
+ update_work = nfp_tun_alloc_neigh_update_work(app, n);
+ if (!update_work)
+ return NOTIFY_DONE;
+
+ queue_work(system_highpri_wq, &update_work->work);
+
+ return NOTIFY_DONE;
}
void nfp_tunnel_request_route_v4(struct nfp_app *app, struct sk_buff *skb)
@@ -706,6 +762,7 @@ void nfp_tunnel_request_route_v4(struct nfp_app *app, struct sk_buff *skb)
netdev = nfp_app_dev_get(app, be32_to_cpu(payload->ingress_port), NULL);
if (!netdev)
goto fail_rcu_unlock;
+ dev_hold(netdev);
flow.daddr = payload->ipv4_addr;
flow.flowi4_proto = IPPROTO_UDP;
@@ -725,13 +782,16 @@ void nfp_tunnel_request_route_v4(struct nfp_app *app, struct sk_buff *skb)
ip_rt_put(rt);
if (!n)
goto fail_rcu_unlock;
+ rcu_read_unlock();
+
nfp_tun_write_neigh(n->dev, app, &flow, n, false, true);
neigh_release(n);
- rcu_read_unlock();
+ dev_put(netdev);
return;
fail_rcu_unlock:
rcu_read_unlock();
+ dev_put(netdev);
nfp_flower_cmsg_warn(app, "Requested route not found.\n");
}
@@ -749,6 +809,7 @@ void nfp_tunnel_request_route_v6(struct nfp_app *app, struct sk_buff *skb)
netdev = nfp_app_dev_get(app, be32_to_cpu(payload->ingress_port), NULL);
if (!netdev)
goto fail_rcu_unlock;
+ dev_hold(netdev);
flow.daddr = payload->ipv6_addr;
flow.flowi6_proto = IPPROTO_UDP;
@@ -766,14 +827,16 @@ void nfp_tunnel_request_route_v6(struct nfp_app *app, struct sk_buff *skb)
dst_release(dst);
if (!n)
goto fail_rcu_unlock;
+ rcu_read_unlock();
nfp_tun_write_neigh(n->dev, app, &flow, n, true, true);
neigh_release(n);
- rcu_read_unlock();
+ dev_put(netdev);
return;
fail_rcu_unlock:
rcu_read_unlock();
+ dev_put(netdev);
nfp_flower_cmsg_warn(app, "Requested IPv6 route not found.\n");
}
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_dev.h b/drivers/net/ethernet/pensando/ionic/ionic_dev.h
index 1dbc3cb50b1d..9b5463040075 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_dev.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic_dev.h
@@ -223,7 +223,7 @@ struct ionic_desc_info {
void *cb_arg;
};
-#define IONIC_QUEUE_NAME_MAX_SZ 32
+#define IONIC_QUEUE_NAME_MAX_SZ 16
struct ionic_queue {
struct device *dev;
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.c b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
index edc14730ce88..bad919343180 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_lif.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
@@ -49,24 +49,24 @@ static void ionic_lif_queue_identify(struct ionic_lif *lif);
static void ionic_dim_work(struct work_struct *work)
{
struct dim *dim = container_of(work, struct dim, work);
+ struct ionic_intr_info *intr;
struct dim_cq_moder cur_moder;
struct ionic_qcq *qcq;
+ struct ionic_lif *lif;
u32 new_coal;
cur_moder = net_dim_get_rx_moderation(dim->mode, dim->profile_ix);
qcq = container_of(dim, struct ionic_qcq, dim);
- new_coal = ionic_coal_usec_to_hw(qcq->q.lif->ionic, cur_moder.usec);
+ lif = qcq->q.lif;
+ new_coal = ionic_coal_usec_to_hw(lif->ionic, cur_moder.usec);
new_coal = new_coal ? new_coal : 1;
- if (qcq->intr.dim_coal_hw != new_coal) {
- unsigned int qi = qcq->cq.bound_q->index;
- struct ionic_lif *lif = qcq->q.lif;
-
- qcq->intr.dim_coal_hw = new_coal;
+ intr = &qcq->intr;
+ if (intr->dim_coal_hw != new_coal) {
+ intr->dim_coal_hw = new_coal;
ionic_intr_coal_init(lif->ionic->idev.intr_ctrl,
- lif->rxqcqs[qi]->intr.index,
- qcq->intr.dim_coal_hw);
+ intr->index, intr->dim_coal_hw);
}
dim->state = DIM_START_MEASURE;
diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
index 295366a85c63..bb787a52bc75 100644
--- a/drivers/net/ethernet/realtek/r8169_main.c
+++ b/drivers/net/ethernet/realtek/r8169_main.c
@@ -196,6 +196,7 @@ enum rtl_registers {
/* No threshold before first PCI xfer */
#define RX_FIFO_THRESH (7 << RXCFG_FIFO_SHIFT)
#define RX_EARLY_OFF (1 << 11)
+#define RX_PAUSE_SLOT_ON (1 << 11) /* 8125b and later */
#define RXCFG_DMA_SHIFT 8
/* Unlimited maximum PCI burst. */
#define RX_DMA_BURST (7 << RXCFG_DMA_SHIFT)
@@ -579,6 +580,7 @@ struct rtl8169_tc_offsets {
enum rtl_flag {
RTL_FLAG_TASK_ENABLED = 0,
RTL_FLAG_TASK_RESET_PENDING,
+ RTL_FLAG_TASK_RESET_NO_QUEUE_WAKE,
RTL_FLAG_TASK_TX_TIMEOUT,
RTL_FLAG_MAX
};
@@ -2305,9 +2307,13 @@ static void rtl_init_rxcfg(struct rtl8169_private *tp)
case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_53:
RTL_W32(tp, RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST | RX_EARLY_OFF);
break;
- case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_63:
+ case RTL_GIGA_MAC_VER_61:
RTL_W32(tp, RxConfig, RX_FETCH_DFLT_8125 | RX_DMA_BURST);
break;
+ case RTL_GIGA_MAC_VER_63:
+ RTL_W32(tp, RxConfig, RX_FETCH_DFLT_8125 | RX_DMA_BURST |
+ RX_PAUSE_SLOT_ON);
+ break;
default:
RTL_W32(tp, RxConfig, RX128_INT_EN | RX_DMA_BURST);
break;
@@ -4582,6 +4588,8 @@ static void rtl_task(struct work_struct *work)
reset:
rtl_reset_work(tp);
netif_wake_queue(tp->dev);
+ } else if (test_and_clear_bit(RTL_FLAG_TASK_RESET_NO_QUEUE_WAKE, tp->wk.flags)) {
+ rtl_reset_work(tp);
}
out_unlock:
rtnl_unlock();
@@ -4615,7 +4623,7 @@ static void r8169_phylink_handler(struct net_device *ndev)
} else {
/* In few cases rx is broken after link-down otherwise */
if (rtl_is_8125(tp))
- rtl_reset_work(tp);
+ rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_NO_QUEUE_WAKE);
pm_runtime_idle(d);
}
@@ -4691,7 +4699,7 @@ static int rtl8169_close(struct net_device *dev)
rtl8169_down(tp);
rtl8169_rx_clear(tp);
- cancel_work_sync(&tp->wk.work);
+ cancel_work(&tp->wk.work);
free_irq(tp->irq, tp);
@@ -4925,6 +4933,8 @@ static void rtl_remove_one(struct pci_dev *pdev)
if (pci_dev_run_wake(pdev))
pm_runtime_get_noresume(&pdev->dev);
+ cancel_work_sync(&tp->wk.work);
+
unregister_netdev(tp->dev);
if (tp->dash_type != RTL_DASH_NONE)
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index c70cff80cc99..664eda4b5a11 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -515,6 +515,15 @@ static void ravb_emac_init_gbeth(struct net_device *ndev)
{
struct ravb_private *priv = netdev_priv(ndev);
+ if (priv->phy_interface == PHY_INTERFACE_MODE_MII) {
+ ravb_write(ndev, (1000 << 16) | CXR35_SEL_XMII_MII, CXR35);
+ ravb_modify(ndev, CXR31, CXR31_SEL_LINK0 | CXR31_SEL_LINK1, 0);
+ } else {
+ ravb_write(ndev, (1000 << 16) | CXR35_SEL_XMII_RGMII, CXR35);
+ ravb_modify(ndev, CXR31, CXR31_SEL_LINK0 | CXR31_SEL_LINK1,
+ CXR31_SEL_LINK0);
+ }
+
/* Receive frame limit set register */
ravb_write(ndev, GBETH_RX_BUFF_MAX + ETH_FCS_LEN, RFLR);
@@ -537,14 +546,6 @@ static void ravb_emac_init_gbeth(struct net_device *ndev)
/* E-MAC interrupt enable register */
ravb_write(ndev, ECSIPR_ICDIP, ECSIPR);
-
- if (priv->phy_interface == PHY_INTERFACE_MODE_MII) {
- ravb_modify(ndev, CXR31, CXR31_SEL_LINK0 | CXR31_SEL_LINK1, 0);
- ravb_write(ndev, (1000 << 16) | CXR35_SEL_XMII_MII, CXR35);
- } else {
- ravb_modify(ndev, CXR31, CXR31_SEL_LINK0 | CXR31_SEL_LINK1,
- CXR31_SEL_LINK0);
- }
}
static void ravb_emac_init_rcar(struct net_device *ndev)
@@ -1811,19 +1812,20 @@ static int ravb_open(struct net_device *ndev)
if (info->gptp)
ravb_ptp_init(ndev, priv->pdev);
- netif_tx_start_all_queues(ndev);
-
/* PHY control start */
error = ravb_phy_start(ndev);
if (error)
goto out_ptp_stop;
+ netif_tx_start_all_queues(ndev);
+
return 0;
out_ptp_stop:
/* Stop PTP Clock driver */
if (info->gptp)
ravb_ptp_stop(ndev);
+ ravb_stop_dma(ndev);
out_free_irq_mgmta:
if (!info->multi_irqs)
goto out_free_irq;
@@ -1874,6 +1876,12 @@ static void ravb_tx_timeout_work(struct work_struct *work)
struct net_device *ndev = priv->ndev;
int error;
+ if (!rtnl_trylock()) {
+ usleep_range(1000, 2000);
+ schedule_work(&priv->work);
+ return;
+ }
+
netif_tx_stop_all_queues(ndev);
/* Stop PTP Clock driver */
@@ -1907,7 +1915,7 @@ static void ravb_tx_timeout_work(struct work_struct *work)
*/
netdev_err(ndev, "%s: ravb_dmac_init() failed, error %d\n",
__func__, error);
- return;
+ goto out_unlock;
}
ravb_emac_init(ndev);
@@ -1917,6 +1925,9 @@ out:
ravb_ptp_init(ndev, priv->pdev);
netif_tx_start_all_queues(ndev);
+
+out_unlock:
+ rtnl_unlock();
}
/* Packet transmit function for Ethernet AVB */
@@ -2645,9 +2656,14 @@ static int ravb_probe(struct platform_device *pdev)
ndev->features = info->net_features;
ndev->hw_features = info->net_hw_features;
- reset_control_deassert(rstc);
+ error = reset_control_deassert(rstc);
+ if (error)
+ goto out_free_netdev;
+
pm_runtime_enable(&pdev->dev);
- pm_runtime_get_sync(&pdev->dev);
+ error = pm_runtime_resume_and_get(&pdev->dev);
+ if (error < 0)
+ goto out_rpm_disable;
if (info->multi_irqs) {
if (info->err_mgmt_irqs)
@@ -2872,11 +2888,12 @@ out_disable_gptp_clk:
out_disable_refclk:
clk_disable_unprepare(priv->refclk);
out_release:
- free_netdev(ndev);
-
pm_runtime_put(&pdev->dev);
+out_rpm_disable:
pm_runtime_disable(&pdev->dev);
reset_control_assert(rstc);
+out_free_netdev:
+ free_netdev(ndev);
return error;
}
@@ -2886,22 +2903,26 @@ static void ravb_remove(struct platform_device *pdev)
struct ravb_private *priv = netdev_priv(ndev);
const struct ravb_hw_info *info = priv->info;
- /* Stop PTP Clock driver */
- if (info->ccc_gac)
- ravb_ptp_stop(ndev);
-
- clk_disable_unprepare(priv->gptp_clk);
- clk_disable_unprepare(priv->refclk);
-
- /* Set reset mode */
- ravb_write(ndev, CCC_OPC_RESET, CCC);
unregister_netdev(ndev);
if (info->nc_queues)
netif_napi_del(&priv->napi[RAVB_NC]);
netif_napi_del(&priv->napi[RAVB_BE]);
+
ravb_mdio_release(priv);
+
+ /* Stop PTP Clock driver */
+ if (info->ccc_gac)
+ ravb_ptp_stop(ndev);
+
dma_free_coherent(ndev->dev.parent, priv->desc_bat_size, priv->desc_bat,
priv->desc_bat_dma);
+
+ /* Set reset mode */
+ ravb_write(ndev, CCC_OPC_RESET, CCC);
+
+ clk_disable_unprepare(priv->gptp_clk);
+ clk_disable_unprepare(priv->refclk);
+
pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
reset_control_assert(priv->rstc);
diff --git a/drivers/net/ethernet/renesas/rswitch.c b/drivers/net/ethernet/renesas/rswitch.c
index 43a7795d6591..e77c6ff93d81 100644
--- a/drivers/net/ethernet/renesas/rswitch.c
+++ b/drivers/net/ethernet/renesas/rswitch.c
@@ -1504,8 +1504,8 @@ static netdev_tx_t rswitch_start_xmit(struct sk_buff *skb, struct net_device *nd
{
struct rswitch_device *rdev = netdev_priv(ndev);
struct rswitch_gwca_queue *gq = rdev->tx_queue;
+ netdev_tx_t ret = NETDEV_TX_OK;
struct rswitch_ext_desc *desc;
- int ret = NETDEV_TX_OK;
dma_addr_t dma_addr;
if (rswitch_get_num_cur_queues(gq) >= gq->ring_size - 1) {
@@ -1517,10 +1517,8 @@ static netdev_tx_t rswitch_start_xmit(struct sk_buff *skb, struct net_device *nd
return ret;
dma_addr = dma_map_single(ndev->dev.parent, skb->data, skb->len, DMA_TO_DEVICE);
- if (dma_mapping_error(ndev->dev.parent, dma_addr)) {
- dev_kfree_skb_any(skb);
- return ret;
- }
+ if (dma_mapping_error(ndev->dev.parent, dma_addr))
+ goto err_kfree;
gq->skbs[gq->cur] = skb;
desc = &gq->tx_ring[gq->cur];
@@ -1533,10 +1531,8 @@ static netdev_tx_t rswitch_start_xmit(struct sk_buff *skb, struct net_device *nd
struct rswitch_gwca_ts_info *ts_info;
ts_info = kzalloc(sizeof(*ts_info), GFP_ATOMIC);
- if (!ts_info) {
- dma_unmap_single(ndev->dev.parent, dma_addr, skb->len, DMA_TO_DEVICE);
- return -ENOMEM;
- }
+ if (!ts_info)
+ goto err_unmap;
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
rdev->ts_tag++;
@@ -1559,6 +1555,14 @@ static netdev_tx_t rswitch_start_xmit(struct sk_buff *skb, struct net_device *nd
rswitch_modify(rdev->addr, GWTRC(gq->index), 0, BIT(gq->index % 32));
return ret;
+
+err_unmap:
+ dma_unmap_single(ndev->dev.parent, dma_addr, skb->len, DMA_TO_DEVICE);
+
+err_kfree:
+ dev_kfree_skb_any(skb);
+
+ return ret;
}
static struct net_device_stats *rswitch_get_stats(struct net_device *ndev)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac5.c b/drivers/net/ethernet/stmicro/stmmac/dwmac5.c
index e95d35f1e5a0..8fd167501fa0 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac5.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac5.c
@@ -710,28 +710,22 @@ void dwmac5_est_irq_status(void __iomem *ioaddr, struct net_device *dev,
}
}
-void dwmac5_fpe_configure(void __iomem *ioaddr, u32 num_txq, u32 num_rxq,
+void dwmac5_fpe_configure(void __iomem *ioaddr, struct stmmac_fpe_cfg *cfg,
+ u32 num_txq, u32 num_rxq,
bool enable)
{
u32 value;
- if (!enable) {
- value = readl(ioaddr + MAC_FPE_CTRL_STS);
-
- value &= ~EFPE;
-
- writel(value, ioaddr + MAC_FPE_CTRL_STS);
- return;
+ if (enable) {
+ cfg->fpe_csr = EFPE;
+ value = readl(ioaddr + GMAC_RXQ_CTRL1);
+ value &= ~GMAC_RXQCTRL_FPRQ;
+ value |= (num_rxq - 1) << GMAC_RXQCTRL_FPRQ_SHIFT;
+ writel(value, ioaddr + GMAC_RXQ_CTRL1);
+ } else {
+ cfg->fpe_csr = 0;
}
-
- value = readl(ioaddr + GMAC_RXQ_CTRL1);
- value &= ~GMAC_RXQCTRL_FPRQ;
- value |= (num_rxq - 1) << GMAC_RXQCTRL_FPRQ_SHIFT;
- writel(value, ioaddr + GMAC_RXQ_CTRL1);
-
- value = readl(ioaddr + MAC_FPE_CTRL_STS);
- value |= EFPE;
- writel(value, ioaddr + MAC_FPE_CTRL_STS);
+ writel(cfg->fpe_csr, ioaddr + MAC_FPE_CTRL_STS);
}
int dwmac5_fpe_irq_status(void __iomem *ioaddr, struct net_device *dev)
@@ -741,6 +735,9 @@ int dwmac5_fpe_irq_status(void __iomem *ioaddr, struct net_device *dev)
status = FPE_EVENT_UNKNOWN;
+ /* Reads from the MAC_FPE_CTRL_STS register should only be performed
+ * here, since the status flags of MAC_FPE_CTRL_STS are "clear on read"
+ */
value = readl(ioaddr + MAC_FPE_CTRL_STS);
if (value & TRSP) {
@@ -766,19 +763,15 @@ int dwmac5_fpe_irq_status(void __iomem *ioaddr, struct net_device *dev)
return status;
}
-void dwmac5_fpe_send_mpacket(void __iomem *ioaddr, enum stmmac_mpacket_type type)
+void dwmac5_fpe_send_mpacket(void __iomem *ioaddr, struct stmmac_fpe_cfg *cfg,
+ enum stmmac_mpacket_type type)
{
- u32 value;
+ u32 value = cfg->fpe_csr;
- value = readl(ioaddr + MAC_FPE_CTRL_STS);
-
- if (type == MPACKET_VERIFY) {
- value &= ~SRSP;
+ if (type == MPACKET_VERIFY)
value |= SVER;
- } else {
- value &= ~SVER;
+ else if (type == MPACKET_RESPONSE)
value |= SRSP;
- }
writel(value, ioaddr + MAC_FPE_CTRL_STS);
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac5.h b/drivers/net/ethernet/stmicro/stmmac/dwmac5.h
index 53c138d0ff48..34e620790eb3 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac5.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac5.h
@@ -153,9 +153,11 @@ int dwmac5_est_configure(void __iomem *ioaddr, struct stmmac_est *cfg,
unsigned int ptp_rate);
void dwmac5_est_irq_status(void __iomem *ioaddr, struct net_device *dev,
struct stmmac_extra_stats *x, u32 txqcnt);
-void dwmac5_fpe_configure(void __iomem *ioaddr, u32 num_txq, u32 num_rxq,
+void dwmac5_fpe_configure(void __iomem *ioaddr, struct stmmac_fpe_cfg *cfg,
+ u32 num_txq, u32 num_rxq,
bool enable);
void dwmac5_fpe_send_mpacket(void __iomem *ioaddr,
+ struct stmmac_fpe_cfg *cfg,
enum stmmac_mpacket_type type);
int dwmac5_fpe_irq_status(void __iomem *ioaddr, struct net_device *dev);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
index 453e88b75be0..a74e71db79f9 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
@@ -1484,7 +1484,8 @@ static int dwxgmac3_est_configure(void __iomem *ioaddr, struct stmmac_est *cfg,
return 0;
}
-static void dwxgmac3_fpe_configure(void __iomem *ioaddr, u32 num_txq,
+static void dwxgmac3_fpe_configure(void __iomem *ioaddr, struct stmmac_fpe_cfg *cfg,
+ u32 num_txq,
u32 num_rxq, bool enable)
{
u32 value;
diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.h b/drivers/net/ethernet/stmicro/stmmac/hwif.h
index b95d3e137813..68aa2d5ca6e5 100644
--- a/drivers/net/ethernet/stmicro/stmmac/hwif.h
+++ b/drivers/net/ethernet/stmicro/stmmac/hwif.h
@@ -412,9 +412,11 @@ struct stmmac_ops {
unsigned int ptp_rate);
void (*est_irq_status)(void __iomem *ioaddr, struct net_device *dev,
struct stmmac_extra_stats *x, u32 txqcnt);
- void (*fpe_configure)(void __iomem *ioaddr, u32 num_txq, u32 num_rxq,
+ void (*fpe_configure)(void __iomem *ioaddr, struct stmmac_fpe_cfg *cfg,
+ u32 num_txq, u32 num_rxq,
bool enable);
void (*fpe_send_mpacket)(void __iomem *ioaddr,
+ struct stmmac_fpe_cfg *cfg,
enum stmmac_mpacket_type type);
int (*fpe_irq_status)(void __iomem *ioaddr, struct net_device *dev);
};
diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
index ea4910ae0921..6a7c1d325c46 100644
--- a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
@@ -177,8 +177,10 @@
#define MMC_XGMAC_RX_DISCARD_OCT_GB 0x1b4
#define MMC_XGMAC_RX_ALIGN_ERR_PKT 0x1bc
+#define MMC_XGMAC_TX_FPE_INTR_MASK 0x204
#define MMC_XGMAC_TX_FPE_FRAG 0x208
#define MMC_XGMAC_TX_HOLD_REQ 0x20c
+#define MMC_XGMAC_RX_FPE_INTR_MASK 0x224
#define MMC_XGMAC_RX_PKT_ASSEMBLY_ERR 0x228
#define MMC_XGMAC_RX_PKT_SMD_ERR 0x22c
#define MMC_XGMAC_RX_PKT_ASSEMBLY_OK 0x230
@@ -352,6 +354,8 @@ static void dwxgmac_mmc_intr_all_mask(void __iomem *mmcaddr)
{
writel(0x0, mmcaddr + MMC_RX_INTR_MASK);
writel(0x0, mmcaddr + MMC_TX_INTR_MASK);
+ writel(MMC_DEFAULT_MASK, mmcaddr + MMC_XGMAC_TX_FPE_INTR_MASK);
+ writel(MMC_DEFAULT_MASK, mmcaddr + MMC_XGMAC_RX_FPE_INTR_MASK);
writel(MMC_DEFAULT_MASK, mmcaddr + MMC_XGMAC_RX_IPC_INTR_MASK);
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 2afb2bd25977..37e64283f910 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -964,7 +964,8 @@ static void stmmac_fpe_link_state_handle(struct stmmac_priv *priv, bool is_up)
bool *hs_enable = &fpe_cfg->hs_enable;
if (is_up && *hs_enable) {
- stmmac_fpe_send_mpacket(priv, priv->ioaddr, MPACKET_VERIFY);
+ stmmac_fpe_send_mpacket(priv, priv->ioaddr, fpe_cfg,
+ MPACKET_VERIFY);
} else {
*lo_state = FPE_STATE_OFF;
*lp_state = FPE_STATE_OFF;
@@ -5839,6 +5840,7 @@ static void stmmac_fpe_event_status(struct stmmac_priv *priv, int status)
/* If user has requested FPE enable, quickly response */
if (*hs_enable)
stmmac_fpe_send_mpacket(priv, priv->ioaddr,
+ fpe_cfg,
MPACKET_RESPONSE);
}
@@ -7263,6 +7265,7 @@ static void stmmac_fpe_lp_task(struct work_struct *work)
if (*lo_state == FPE_STATE_ENTERING_ON &&
*lp_state == FPE_STATE_ENTERING_ON) {
stmmac_fpe_configure(priv, priv->ioaddr,
+ fpe_cfg,
priv->plat->tx_queues_to_use,
priv->plat->rx_queues_to_use,
*enable);
@@ -7281,6 +7284,7 @@ static void stmmac_fpe_lp_task(struct work_struct *work)
netdev_info(priv->dev, SEND_VERIFY_MPAKCET_FMT,
*lo_state, *lp_state);
stmmac_fpe_send_mpacket(priv, priv->ioaddr,
+ fpe_cfg,
MPACKET_VERIFY);
}
/* Sleep then retry */
@@ -7295,6 +7299,7 @@ void stmmac_fpe_handshake(struct stmmac_priv *priv, bool enable)
if (priv->plat->fpe_cfg->hs_enable != enable) {
if (enable) {
stmmac_fpe_send_mpacket(priv, priv->ioaddr,
+ priv->plat->fpe_cfg,
MPACKET_VERIFY);
} else {
priv->plat->fpe_cfg->lo_fpe_state = FPE_STATE_OFF;
@@ -7755,6 +7760,7 @@ int stmmac_suspend(struct device *dev)
if (priv->dma_cap.fpesel) {
/* Disable FPE */
stmmac_fpe_configure(priv, priv->ioaddr,
+ priv->plat->fpe_cfg,
priv->plat->tx_queues_to_use,
priv->plat->rx_queues_to_use, false);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
index ac41ef4cbd2f..6ad3e0a11936 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
@@ -1079,6 +1079,7 @@ disable:
priv->plat->fpe_cfg->enable = false;
stmmac_fpe_configure(priv, priv->ioaddr,
+ priv->plat->fpe_cfg,
priv->plat->tx_queues_to_use,
priv->plat->rx_queues_to_use,
false);
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_lib.c b/drivers/net/ethernet/wangxun/libwx/wx_lib.c
index 2823861e5a92..a5a50b5a8816 100644
--- a/drivers/net/ethernet/wangxun/libwx/wx_lib.c
+++ b/drivers/net/ethernet/wangxun/libwx/wx_lib.c
@@ -1972,11 +1972,11 @@ void wx_reset_interrupt_capability(struct wx *wx)
if (!pdev->msi_enabled && !pdev->msix_enabled)
return;
- pci_free_irq_vectors(wx->pdev);
if (pdev->msix_enabled) {
kfree(wx->msix_entries);
wx->msix_entries = NULL;
}
+ pci_free_irq_vectors(wx->pdev);
}
EXPORT_SYMBOL(wx_reset_interrupt_capability);
diff --git a/drivers/net/hyperv/Kconfig b/drivers/net/hyperv/Kconfig
index ca7bf7f897d3..c8cbd85adcf9 100644
--- a/drivers/net/hyperv/Kconfig
+++ b/drivers/net/hyperv/Kconfig
@@ -3,5 +3,6 @@ config HYPERV_NET
tristate "Microsoft Hyper-V virtual network driver"
depends on HYPERV
select UCS2_STRING
+ select NLS
help
Select this option to enable the Hyper-V virtual network driver.
diff --git a/drivers/net/netdevsim/bpf.c b/drivers/net/netdevsim/bpf.c
index f60eb97e3a62..608953d4f98d 100644
--- a/drivers/net/netdevsim/bpf.c
+++ b/drivers/net/netdevsim/bpf.c
@@ -93,7 +93,7 @@ static void nsim_prog_set_loaded(struct bpf_prog *prog, bool loaded)
{
struct nsim_bpf_bound_prog *state;
- if (!prog || !prog->aux->offload)
+ if (!prog || !bpf_prog_is_offloaded(prog->aux))
return;
state = prog->aux->offload->dev_priv;
@@ -311,7 +311,7 @@ nsim_setup_prog_hw_checks(struct netdevsim *ns, struct netdev_bpf *bpf)
if (!bpf->prog)
return 0;
- if (!bpf->prog->aux->offload) {
+ if (!bpf_prog_is_offloaded(bpf->prog->aux)) {
NSIM_EA(bpf->extack, "xdpoffload of non-bound program");
return -EINVAL;
}
diff --git a/drivers/net/netkit.c b/drivers/net/netkit.c
index 97bd6705c241..39171380ccf2 100644
--- a/drivers/net/netkit.c
+++ b/drivers/net/netkit.c
@@ -851,6 +851,12 @@ static int netkit_change_link(struct net_device *dev, struct nlattr *tb[],
return -EACCES;
}
+ if (data[IFLA_NETKIT_PEER_INFO]) {
+ NL_SET_ERR_MSG_ATTR(extack, data[IFLA_NETKIT_PEER_INFO],
+ "netkit peer info cannot be changed after device creation");
+ return -EINVAL;
+ }
+
if (data[IFLA_NETKIT_POLICY]) {
attr = data[IFLA_NETKIT_POLICY];
policy = nla_get_u32(attr);
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index 2c5c1e91ded6..9bf2140fd0a1 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -3000,6 +3000,8 @@ static void rtl8152_nic_reset(struct r8152 *tp)
ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CR, CR_RST);
for (i = 0; i < 1000; i++) {
+ if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
+ break;
if (!(ocp_read_byte(tp, MCU_TYPE_PLA, PLA_CR) & CR_RST))
break;
usleep_range(100, 400);
@@ -3329,6 +3331,8 @@ static void rtl_disable(struct r8152 *tp)
rxdy_gated_en(tp, true);
for (i = 0; i < 1000; i++) {
+ if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
+ break;
ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL);
if ((ocp_data & FIFO_EMPTY) == FIFO_EMPTY)
break;
@@ -3336,6 +3340,8 @@ static void rtl_disable(struct r8152 *tp)
}
for (i = 0; i < 1000; i++) {
+ if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
+ break;
if (ocp_read_word(tp, MCU_TYPE_PLA, PLA_TCR0) & TCR0_TX_EMPTY)
break;
usleep_range(1000, 2000);
@@ -5499,6 +5505,8 @@ static void wait_oob_link_list_ready(struct r8152 *tp)
int i;
for (i = 0; i < 1000; i++) {
+ if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
+ break;
ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL);
if (ocp_data & LINK_LIST_READY)
break;
@@ -5513,6 +5521,8 @@ static void r8156b_wait_loading_flash(struct r8152 *tp)
int i;
for (i = 0; i < 100; i++) {
+ if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
+ break;
if (ocp_read_word(tp, MCU_TYPE_USB, USB_GPHY_CTRL) & GPHY_PATCH_DONE)
break;
usleep_range(1000, 2000);
@@ -5635,6 +5645,8 @@ static int r8153_pre_firmware_1(struct r8152 *tp)
for (i = 0; i < 104; i++) {
u32 ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_WDT1_CTRL);
+ if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
+ return -ENODEV;
if (!(ocp_data & WTD1_EN))
break;
usleep_range(1000, 2000);
@@ -5791,6 +5803,8 @@ static void r8153_aldps_en(struct r8152 *tp, bool enable)
data &= ~EN_ALDPS;
ocp_reg_write(tp, OCP_POWER_CFG, data);
for (i = 0; i < 20; i++) {
+ if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
+ return;
usleep_range(1000, 2000);
if (ocp_read_word(tp, MCU_TYPE_PLA, 0xe000) & 0x0100)
break;
@@ -8397,6 +8411,8 @@ static int rtl8152_pre_reset(struct usb_interface *intf)
struct r8152 *tp = usb_get_intfdata(intf);
struct net_device *netdev;
+ rtnl_lock();
+
if (!tp || !test_bit(PROBED_WITH_NO_ERRORS, &tp->flags))
return 0;
@@ -8428,20 +8444,17 @@ static int rtl8152_post_reset(struct usb_interface *intf)
struct sockaddr sa;
if (!tp || !test_bit(PROBED_WITH_NO_ERRORS, &tp->flags))
- return 0;
+ goto exit;
rtl_set_accessible(tp);
/* reset the MAC address in case of policy change */
- if (determine_ethernet_addr(tp, &sa) >= 0) {
- rtnl_lock();
+ if (determine_ethernet_addr(tp, &sa) >= 0)
dev_set_mac_address (tp->netdev, &sa, NULL);
- rtnl_unlock();
- }
netdev = tp->netdev;
if (!netif_running(netdev))
- return 0;
+ goto exit;
set_bit(WORK_ENABLE, &tp->flags);
if (netif_carrier_ok(netdev)) {
@@ -8460,6 +8473,8 @@ static int rtl8152_post_reset(struct usb_interface *intf)
if (!list_empty(&tp->rx_done))
napi_schedule(&tp->napi);
+exit:
+ rtnl_unlock();
return 0;
}
@@ -10034,6 +10049,7 @@ static const struct usb_device_id rtl8152_table[] = {
{ USB_DEVICE(VENDOR_ID_NVIDIA, 0x09ff) },
{ USB_DEVICE(VENDOR_ID_TPLINK, 0x0601) },
{ USB_DEVICE(VENDOR_ID_DLINK, 0xb301) },
+ { USB_DEVICE(VENDOR_ID_ASUS, 0x1976) },
{}
};
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index 57efb3454c57..977861c46b1f 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -790,7 +790,8 @@ static int veth_convert_skb_to_xdp_buff(struct veth_rq *rq,
skb_add_rx_frag(nskb, i, page, page_offset, size,
truesize);
- if (skb_copy_bits(skb, off, page_address(page),
+ if (skb_copy_bits(skb, off,
+ page_address(page) + page_offset,
size)) {
consume_skb(nskb);
goto drop;
diff --git a/drivers/net/wireless/ath/ath9k/Kconfig b/drivers/net/wireless/ath/ath9k/Kconfig
index e150d82eddb6..0c47be06c153 100644
--- a/drivers/net/wireless/ath/ath9k/Kconfig
+++ b/drivers/net/wireless/ath/ath9k/Kconfig
@@ -57,8 +57,7 @@ config ATH9K_AHB
config ATH9K_DEBUGFS
bool "Atheros ath9k debugging"
- depends on ATH9K && DEBUG_FS
- select MAC80211_DEBUGFS
+ depends on ATH9K && DEBUG_FS && MAC80211_DEBUGFS
select ATH9K_COMMON_DEBUG
help
Say Y, if you need access to ath9k's statistics for
@@ -70,7 +69,6 @@ config ATH9K_DEBUGFS
config ATH9K_STATION_STATISTICS
bool "Detailed station statistics"
depends on ATH9K && ATH9K_DEBUGFS && DEBUG_FS
- select MAC80211_DEBUGFS
default n
help
This option enables detailed statistics for association stations.
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c
index ca5e4fbcf8ce..6af606e5da65 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c
@@ -707,8 +707,10 @@ int iwl_mvm_mld_add_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
rcu_dereference_protected(mvm_sta->link[link_id],
lockdep_is_held(&mvm->mutex));
- if (WARN_ON(!link_conf || !mvm_link_sta))
+ if (WARN_ON(!link_conf || !mvm_link_sta)) {
+ ret = -EINVAL;
goto err;
+ }
ret = iwl_mvm_mld_cfg_sta(mvm, sta, vif, link_sta, link_conf,
mvm_link_sta);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c
index 63f3d4a5c9aa..2cc2d2788f83 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c
@@ -375,6 +375,7 @@ static int mt7921_load_clc(struct mt792x_dev *dev, const char *fw_name)
int ret, i, len, offset = 0;
u8 *clc_base = NULL, hw_encap = 0;
+ dev->phy.clc_chan_conf = 0xff;
if (mt7921_disable_clc ||
mt76_is_usb(&dev->mt76))
return 0;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/main.c b/drivers/net/wireless/mediatek/mt76/mt7925/main.c
index 15c2fb0bcb1b..aa918b9b0469 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7925/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7925/main.c
@@ -14,7 +14,7 @@
static void
mt7925_init_he_caps(struct mt792x_phy *phy, enum nl80211_band band,
struct ieee80211_sband_iftype_data *data,
- enum nl80211_iftype iftype)
+ enum nl80211_iftype iftype)
{
struct ieee80211_sta_he_cap *he_cap = &data->he_cap;
struct ieee80211_he_cap_elem *he_cap_elem = &he_cap->he_cap_elem;
@@ -53,7 +53,7 @@ mt7925_init_he_caps(struct mt792x_phy *phy, enum nl80211_band band,
IEEE80211_HE_PHY_CAP2_UL_MU_FULL_MU_MIMO |
IEEE80211_HE_PHY_CAP2_UL_MU_PARTIAL_MU_MIMO;
- switch (i) {
+ switch (iftype) {
case NL80211_IFTYPE_AP:
he_cap_elem->mac_cap_info[2] |=
IEEE80211_HE_MAC_CAP2_BSR;
diff --git a/drivers/nvme/host/Kconfig b/drivers/nvme/host/Kconfig
index 8fe2dd619e80..b309c8be720f 100644
--- a/drivers/nvme/host/Kconfig
+++ b/drivers/nvme/host/Kconfig
@@ -107,11 +107,12 @@ config NVME_TCP_TLS
If unsure, say N.
config NVME_HOST_AUTH
- bool "NVM Express over Fabrics In-Band Authentication"
+ bool "NVMe over Fabrics In-Band Authentication in host side"
depends on NVME_CORE
select NVME_AUTH
help
- This provides support for NVMe over Fabrics In-Band Authentication.
+ This provides support for NVMe over Fabrics In-Band Authentication in
+ host side.
If unsure, say N.
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 46a4c9c5ea96..8ebdfd623e0f 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -131,7 +131,7 @@ void nvme_queue_scan(struct nvme_ctrl *ctrl)
/*
* Only new queue scan work when admin and IO queues are both alive
*/
- if (ctrl->state == NVME_CTRL_LIVE && ctrl->tagset)
+ if (nvme_ctrl_state(ctrl) == NVME_CTRL_LIVE && ctrl->tagset)
queue_work(nvme_wq, &ctrl->scan_work);
}
@@ -143,7 +143,7 @@ void nvme_queue_scan(struct nvme_ctrl *ctrl)
*/
int nvme_try_sched_reset(struct nvme_ctrl *ctrl)
{
- if (ctrl->state != NVME_CTRL_RESETTING)
+ if (nvme_ctrl_state(ctrl) != NVME_CTRL_RESETTING)
return -EBUSY;
if (!queue_work(nvme_reset_wq, &ctrl->reset_work))
return -EBUSY;
@@ -156,7 +156,7 @@ static void nvme_failfast_work(struct work_struct *work)
struct nvme_ctrl *ctrl = container_of(to_delayed_work(work),
struct nvme_ctrl, failfast_work);
- if (ctrl->state != NVME_CTRL_CONNECTING)
+ if (nvme_ctrl_state(ctrl) != NVME_CTRL_CONNECTING)
return;
set_bit(NVME_CTRL_FAILFAST_EXPIRED, &ctrl->flags);
@@ -200,7 +200,7 @@ int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl)
ret = nvme_reset_ctrl(ctrl);
if (!ret) {
flush_work(&ctrl->reset_work);
- if (ctrl->state != NVME_CTRL_LIVE)
+ if (nvme_ctrl_state(ctrl) != NVME_CTRL_LIVE)
ret = -ENETRESET;
}
@@ -499,7 +499,7 @@ bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
spin_lock_irqsave(&ctrl->lock, flags);
- old_state = ctrl->state;
+ old_state = nvme_ctrl_state(ctrl);
switch (new_state) {
case NVME_CTRL_LIVE:
switch (old_state) {
@@ -567,7 +567,7 @@ bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
}
if (changed) {
- ctrl->state = new_state;
+ WRITE_ONCE(ctrl->state, new_state);
wake_up_all(&ctrl->state_wq);
}
@@ -575,11 +575,11 @@ bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
if (!changed)
return false;
- if (ctrl->state == NVME_CTRL_LIVE) {
+ if (new_state == NVME_CTRL_LIVE) {
if (old_state == NVME_CTRL_CONNECTING)
nvme_stop_failfast_work(ctrl);
nvme_kick_requeue_lists(ctrl);
- } else if (ctrl->state == NVME_CTRL_CONNECTING &&
+ } else if (new_state == NVME_CTRL_CONNECTING &&
old_state == NVME_CTRL_RESETTING) {
nvme_start_failfast_work(ctrl);
}
@@ -592,7 +592,7 @@ EXPORT_SYMBOL_GPL(nvme_change_ctrl_state);
*/
static bool nvme_state_terminal(struct nvme_ctrl *ctrl)
{
- switch (ctrl->state) {
+ switch (nvme_ctrl_state(ctrl)) {
case NVME_CTRL_NEW:
case NVME_CTRL_LIVE:
case NVME_CTRL_RESETTING:
@@ -617,7 +617,7 @@ bool nvme_wait_reset(struct nvme_ctrl *ctrl)
wait_event(ctrl->state_wq,
nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING) ||
nvme_state_terminal(ctrl));
- return ctrl->state == NVME_CTRL_RESETTING;
+ return nvme_ctrl_state(ctrl) == NVME_CTRL_RESETTING;
}
EXPORT_SYMBOL_GPL(nvme_wait_reset);
@@ -704,9 +704,11 @@ EXPORT_SYMBOL_GPL(nvme_init_request);
blk_status_t nvme_fail_nonready_command(struct nvme_ctrl *ctrl,
struct request *rq)
{
- if (ctrl->state != NVME_CTRL_DELETING_NOIO &&
- ctrl->state != NVME_CTRL_DELETING &&
- ctrl->state != NVME_CTRL_DEAD &&
+ enum nvme_ctrl_state state = nvme_ctrl_state(ctrl);
+
+ if (state != NVME_CTRL_DELETING_NOIO &&
+ state != NVME_CTRL_DELETING &&
+ state != NVME_CTRL_DEAD &&
!test_bit(NVME_CTRL_FAILFAST_EXPIRED, &ctrl->flags) &&
!blk_noretry_request(rq) && !(rq->cmd_flags & REQ_NVME_MPATH))
return BLK_STS_RESOURCE;
@@ -736,7 +738,7 @@ bool __nvme_check_ready(struct nvme_ctrl *ctrl, struct request *rq,
* command, which is require to set the queue live in the
* appropinquate states.
*/
- switch (ctrl->state) {
+ switch (nvme_ctrl_state(ctrl)) {
case NVME_CTRL_CONNECTING:
if (blk_rq_is_passthrough(rq) && nvme_is_fabrics(req->cmd) &&
(req->cmd->fabrics.fctype == nvme_fabrics_type_connect ||
@@ -1192,8 +1194,16 @@ static unsigned long nvme_keep_alive_work_period(struct nvme_ctrl *ctrl)
static void nvme_queue_keep_alive_work(struct nvme_ctrl *ctrl)
{
- queue_delayed_work(nvme_wq, &ctrl->ka_work,
- nvme_keep_alive_work_period(ctrl));
+ unsigned long now = jiffies;
+ unsigned long delay = nvme_keep_alive_work_period(ctrl);
+ unsigned long ka_next_check_tm = ctrl->ka_last_check_time + delay;
+
+ if (time_after(now, ka_next_check_tm))
+ delay = 0;
+ else
+ delay = ka_next_check_tm - now;
+
+ queue_delayed_work(nvme_wq, &ctrl->ka_work, delay);
}
static enum rq_end_io_ret nvme_keep_alive_end_io(struct request *rq,
@@ -1479,7 +1489,8 @@ static int nvme_ns_info_from_identify(struct nvme_ctrl *ctrl,
if (id->ncap == 0) {
/* namespace not allocated or attached */
info->is_removed = true;
- return -ENODEV;
+ ret = -ENODEV;
+ goto error;
}
info->anagrpid = id->anagrpid;
@@ -1497,8 +1508,10 @@ static int nvme_ns_info_from_identify(struct nvme_ctrl *ctrl,
!memchr_inv(ids->nguid, 0, sizeof(ids->nguid)))
memcpy(ids->nguid, id->nguid, sizeof(ids->nguid));
}
+
+error:
kfree(id);
- return 0;
+ return ret;
}
static int nvme_ns_info_from_id_cs_indep(struct nvme_ctrl *ctrl,
@@ -1890,9 +1903,10 @@ static void nvme_update_disk_info(struct gendisk *disk,
/*
* The block layer can't support LBA sizes larger than the page size
- * yet, so catch this early and don't allow block I/O.
+ * or smaller than a sector size yet, so catch this early and don't
+ * allow block I/O.
*/
- if (ns->lba_shift > PAGE_SHIFT) {
+ if (ns->lba_shift > PAGE_SHIFT || ns->lba_shift < SECTOR_SHIFT) {
capacity = 0;
bs = (1 << 9);
}
@@ -2029,6 +2043,13 @@ static int nvme_update_ns_info_block(struct nvme_ns *ns,
if (ret)
return ret;
+ if (id->ncap == 0) {
+ /* namespace not allocated or attached */
+ info->is_removed = true;
+ ret = -ENODEV;
+ goto error;
+ }
+
blk_mq_freeze_queue(ns->disk->queue);
lbaf = nvme_lbaf_index(id->flbas);
ns->lba_shift = id->lbaf[lbaf].ds;
@@ -2090,6 +2111,8 @@ out:
set_bit(NVME_NS_READY, &ns->flags);
ret = 0;
}
+
+error:
kfree(id);
return ret;
}
@@ -2529,7 +2552,7 @@ static void nvme_set_latency_tolerance(struct device *dev, s32 val)
if (ctrl->ps_max_latency_us != latency) {
ctrl->ps_max_latency_us = latency;
- if (ctrl->state == NVME_CTRL_LIVE)
+ if (nvme_ctrl_state(ctrl) == NVME_CTRL_LIVE)
nvme_configure_apst(ctrl);
}
}
@@ -3217,7 +3240,7 @@ static int nvme_dev_open(struct inode *inode, struct file *file)
struct nvme_ctrl *ctrl =
container_of(inode->i_cdev, struct nvme_ctrl, cdev);
- switch (ctrl->state) {
+ switch (nvme_ctrl_state(ctrl)) {
case NVME_CTRL_LIVE:
break;
default:
@@ -3639,6 +3662,14 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, struct nvme_ns_info *info)
goto out_unlink_ns;
down_write(&ctrl->namespaces_rwsem);
+ /*
+ * Ensure that no namespaces are added to the ctrl list after the queues
+ * are frozen, thereby avoiding a deadlock between scan and reset.
+ */
+ if (test_bit(NVME_CTRL_FROZEN, &ctrl->flags)) {
+ up_write(&ctrl->namespaces_rwsem);
+ goto out_unlink_ns;
+ }
nvme_ns_add_to_ctrl_list(ns);
up_write(&ctrl->namespaces_rwsem);
nvme_get_ctrl(ctrl);
@@ -3903,7 +3934,7 @@ static void nvme_scan_work(struct work_struct *work)
int ret;
/* No tagset on a live ctrl means IO queues could not created */
- if (ctrl->state != NVME_CTRL_LIVE || !ctrl->tagset)
+ if (nvme_ctrl_state(ctrl) != NVME_CTRL_LIVE || !ctrl->tagset)
return;
/*
@@ -3973,7 +4004,7 @@ void nvme_remove_namespaces(struct nvme_ctrl *ctrl)
* removing the namespaces' disks; fail all the queues now to avoid
* potentially having to clean up the failed sync later.
*/
- if (ctrl->state == NVME_CTRL_DEAD)
+ if (nvme_ctrl_state(ctrl) == NVME_CTRL_DEAD)
nvme_mark_namespaces_dead(ctrl);
/* this is a no-op when called from the controller reset handler */
@@ -4055,7 +4086,7 @@ static void nvme_async_event_work(struct work_struct *work)
* flushing ctrl async_event_work after changing the controller state
* from LIVE and before freeing the admin queue.
*/
- if (ctrl->state == NVME_CTRL_LIVE)
+ if (nvme_ctrl_state(ctrl) == NVME_CTRL_LIVE)
ctrl->ops->submit_async_event(ctrl);
}
@@ -4450,7 +4481,7 @@ int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev,
{
int ret;
- ctrl->state = NVME_CTRL_NEW;
+ WRITE_ONCE(ctrl->state, NVME_CTRL_NEW);
clear_bit(NVME_CTRL_FAILFAST_EXPIRED, &ctrl->flags);
spin_lock_init(&ctrl->lock);
mutex_init(&ctrl->scan_lock);
@@ -4471,6 +4502,7 @@ int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev,
INIT_DELAYED_WORK(&ctrl->failfast_work, nvme_failfast_work);
memset(&ctrl->ka_cmd, 0, sizeof(ctrl->ka_cmd));
ctrl->ka_cmd.common.opcode = nvme_admin_keep_alive;
+ ctrl->ka_last_check_time = jiffies;
BUILD_BUG_ON(NVME_DSM_MAX_RANGES * sizeof(struct nvme_dsm_range) >
PAGE_SIZE);
@@ -4559,6 +4591,7 @@ void nvme_unfreeze(struct nvme_ctrl *ctrl)
list_for_each_entry(ns, &ctrl->namespaces, list)
blk_mq_unfreeze_queue(ns->queue);
up_read(&ctrl->namespaces_rwsem);
+ clear_bit(NVME_CTRL_FROZEN, &ctrl->flags);
}
EXPORT_SYMBOL_GPL(nvme_unfreeze);
@@ -4592,6 +4625,7 @@ void nvme_start_freeze(struct nvme_ctrl *ctrl)
{
struct nvme_ns *ns;
+ set_bit(NVME_CTRL_FROZEN, &ctrl->flags);
down_read(&ctrl->namespaces_rwsem);
list_for_each_entry(ns, &ctrl->namespaces, list)
blk_freeze_queue_start(ns->queue);
diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
index 9f9a3b35dc64..fb22976a36a8 100644
--- a/drivers/nvme/host/fc.c
+++ b/drivers/nvme/host/fc.c
@@ -557,7 +557,7 @@ nvme_fc_rport_get(struct nvme_fc_rport *rport)
static void
nvme_fc_resume_controller(struct nvme_fc_ctrl *ctrl)
{
- switch (ctrl->ctrl.state) {
+ switch (nvme_ctrl_state(&ctrl->ctrl)) {
case NVME_CTRL_NEW:
case NVME_CTRL_CONNECTING:
/*
@@ -793,7 +793,7 @@ nvme_fc_ctrl_connectivity_loss(struct nvme_fc_ctrl *ctrl)
"NVME-FC{%d}: controller connectivity lost. Awaiting "
"Reconnect", ctrl->cnum);
- switch (ctrl->ctrl.state) {
+ switch (nvme_ctrl_state(&ctrl->ctrl)) {
case NVME_CTRL_NEW:
case NVME_CTRL_LIVE:
/*
@@ -3319,7 +3319,7 @@ nvme_fc_reconnect_or_delete(struct nvme_fc_ctrl *ctrl, int status)
unsigned long recon_delay = ctrl->ctrl.opts->reconnect_delay * HZ;
bool recon = true;
- if (ctrl->ctrl.state != NVME_CTRL_CONNECTING)
+ if (nvme_ctrl_state(&ctrl->ctrl) != NVME_CTRL_CONNECTING)
return;
if (portptr->port_state == FC_OBJSTATE_ONLINE) {
diff --git a/drivers/nvme/host/ioctl.c b/drivers/nvme/host/ioctl.c
index 529b9954d2b8..4939ed35638f 100644
--- a/drivers/nvme/host/ioctl.c
+++ b/drivers/nvme/host/ioctl.c
@@ -18,15 +18,12 @@ static bool nvme_cmd_allowed(struct nvme_ns *ns, struct nvme_command *c,
{
u32 effects;
- if (capable(CAP_SYS_ADMIN))
- return true;
-
/*
* Do not allow unprivileged passthrough on partitions, as that allows an
* escape from the containment of the partition.
*/
if (flags & NVME_IOCTL_PARTITION)
- return false;
+ goto admin;
/*
* Do not allow unprivileged processes to send vendor specific or fabrics
@@ -34,7 +31,7 @@ static bool nvme_cmd_allowed(struct nvme_ns *ns, struct nvme_command *c,
*/
if (c->common.opcode >= nvme_cmd_vendor_start ||
c->common.opcode == nvme_fabrics_command)
- return false;
+ goto admin;
/*
* Do not allow unprivileged passthrough of admin commands except
@@ -53,7 +50,7 @@ static bool nvme_cmd_allowed(struct nvme_ns *ns, struct nvme_command *c,
return true;
}
}
- return false;
+ goto admin;
}
/*
@@ -63,7 +60,7 @@ static bool nvme_cmd_allowed(struct nvme_ns *ns, struct nvme_command *c,
*/
effects = nvme_command_effects(ns->ctrl, ns, c->common.opcode);
if (!(effects & NVME_CMD_EFFECTS_CSUPP))
- return false;
+ goto admin;
/*
* Don't allow passthrough for command that have intrusive (or unknown)
@@ -72,16 +69,20 @@ static bool nvme_cmd_allowed(struct nvme_ns *ns, struct nvme_command *c,
if (effects & ~(NVME_CMD_EFFECTS_CSUPP | NVME_CMD_EFFECTS_LBCC |
NVME_CMD_EFFECTS_UUID_SEL |
NVME_CMD_EFFECTS_SCOPE_MASK))
- return false;
+ goto admin;
/*
* Only allow I/O commands that transfer data to the controller or that
* change the logical block contents if the file descriptor is open for
* writing.
*/
- if (nvme_is_write(c) || (effects & NVME_CMD_EFFECTS_LBCC))
- return open_for_write;
+ if ((nvme_is_write(c) || (effects & NVME_CMD_EFFECTS_LBCC)) &&
+ !open_for_write)
+ goto admin;
+
return true;
+admin:
+ return capable(CAP_SYS_ADMIN);
}
/*
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index 39a90b7cb125..e7411dac00f7 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -156,6 +156,11 @@ enum nvme_quirks {
* No temperature thresholds for channels other than 0 (Composite).
*/
NVME_QUIRK_NO_SECONDARY_TEMP_THRESH = (1 << 19),
+
+ /*
+ * Disables simple suspend/resume path.
+ */
+ NVME_QUIRK_FORCE_NO_SIMPLE_SUSPEND = (1 << 20),
};
/*
@@ -251,6 +256,7 @@ enum nvme_ctrl_flags {
NVME_CTRL_STOPPED = 3,
NVME_CTRL_SKIP_ID_CNS_CS = 4,
NVME_CTRL_DIRTY_CAPABILITY = 5,
+ NVME_CTRL_FROZEN = 6,
};
struct nvme_ctrl {
@@ -387,6 +393,11 @@ struct nvme_ctrl {
enum nvme_dctype dctype;
};
+static inline enum nvme_ctrl_state nvme_ctrl_state(struct nvme_ctrl *ctrl)
+{
+ return READ_ONCE(ctrl->state);
+}
+
enum nvme_iopolicy {
NVME_IOPOLICY_NUMA,
NVME_IOPOLICY_RR,
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 507bc149046d..61af7ff1a9d6 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -1233,7 +1233,7 @@ static bool nvme_should_reset(struct nvme_dev *dev, u32 csts)
bool nssro = dev->subsystem && (csts & NVME_CSTS_NSSRO);
/* If there is a reset/reinit ongoing, we shouldn't reset again. */
- switch (dev->ctrl.state) {
+ switch (nvme_ctrl_state(&dev->ctrl)) {
case NVME_CTRL_RESETTING:
case NVME_CTRL_CONNECTING:
return false;
@@ -1321,7 +1321,7 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req)
* cancellation error. All outstanding requests are completed on
* shutdown, so we return BLK_EH_DONE.
*/
- switch (dev->ctrl.state) {
+ switch (nvme_ctrl_state(&dev->ctrl)) {
case NVME_CTRL_CONNECTING:
nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DELETING);
fallthrough;
@@ -1593,7 +1593,7 @@ static int nvme_setup_io_queues_trylock(struct nvme_dev *dev)
/*
* Controller is in wrong state, fail early.
*/
- if (dev->ctrl.state != NVME_CTRL_CONNECTING) {
+ if (nvme_ctrl_state(&dev->ctrl) != NVME_CTRL_CONNECTING) {
mutex_unlock(&dev->shutdown_lock);
return -ENODEV;
}
@@ -2573,13 +2573,13 @@ static bool nvme_pci_ctrl_is_dead(struct nvme_dev *dev)
static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown)
{
+ enum nvme_ctrl_state state = nvme_ctrl_state(&dev->ctrl);
struct pci_dev *pdev = to_pci_dev(dev->dev);
bool dead;
mutex_lock(&dev->shutdown_lock);
dead = nvme_pci_ctrl_is_dead(dev);
- if (dev->ctrl.state == NVME_CTRL_LIVE ||
- dev->ctrl.state == NVME_CTRL_RESETTING) {
+ if (state == NVME_CTRL_LIVE || state == NVME_CTRL_RESETTING) {
if (pci_is_enabled(pdev))
nvme_start_freeze(&dev->ctrl);
/*
@@ -2690,7 +2690,7 @@ static void nvme_reset_work(struct work_struct *work)
bool was_suspend = !!(dev->ctrl.ctrl_config & NVME_CC_SHN_NORMAL);
int result;
- if (dev->ctrl.state != NVME_CTRL_RESETTING) {
+ if (nvme_ctrl_state(&dev->ctrl) != NVME_CTRL_RESETTING) {
dev_warn(dev->ctrl.device, "ctrl state %d is not RESETTING\n",
dev->ctrl.state);
result = -ENODEV;
@@ -2902,6 +2902,18 @@ static unsigned long check_vendor_combination_bug(struct pci_dev *pdev)
if ((dmi_match(DMI_BOARD_VENDOR, "LENOVO")) &&
dmi_match(DMI_BOARD_NAME, "LNVNB161216"))
return NVME_QUIRK_SIMPLE_SUSPEND;
+ } else if (pdev->vendor == 0x2646 && (pdev->device == 0x2263 ||
+ pdev->device == 0x500f)) {
+ /*
+ * Exclude some Kingston NV1 and A2000 devices from
+ * NVME_QUIRK_SIMPLE_SUSPEND. Do a full suspend to save a
+ * lot fo energy with s2idle sleep on some TUXEDO platforms.
+ */
+ if (dmi_match(DMI_BOARD_NAME, "NS5X_NS7XAU") ||
+ dmi_match(DMI_BOARD_NAME, "NS5x_7xAU") ||
+ dmi_match(DMI_BOARD_NAME, "NS5x_7xPU") ||
+ dmi_match(DMI_BOARD_NAME, "PH4PRX1_PH6PRX1"))
+ return NVME_QUIRK_FORCE_NO_SIMPLE_SUSPEND;
}
return 0;
@@ -2932,7 +2944,9 @@ static struct nvme_dev *nvme_pci_alloc_dev(struct pci_dev *pdev,
dev->dev = get_device(&pdev->dev);
quirks |= check_vendor_combination_bug(pdev);
- if (!noacpi && acpi_storage_d3(&pdev->dev)) {
+ if (!noacpi &&
+ !(quirks & NVME_QUIRK_FORCE_NO_SIMPLE_SUSPEND) &&
+ acpi_storage_d3(&pdev->dev)) {
/*
* Some systems use a bios work around to ask for D3 on
* platforms that support kernel managed suspend.
@@ -3192,7 +3206,7 @@ static int nvme_suspend(struct device *dev)
nvme_wait_freeze(ctrl);
nvme_sync_queues(ctrl);
- if (ctrl->state != NVME_CTRL_LIVE)
+ if (nvme_ctrl_state(ctrl) != NVME_CTRL_LIVE)
goto unfreeze;
/*
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index 6d178d555920..81e2621169e5 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -984,10 +984,11 @@ free_ctrl:
static void nvme_rdma_reconnect_or_remove(struct nvme_rdma_ctrl *ctrl)
{
+ enum nvme_ctrl_state state = nvme_ctrl_state(&ctrl->ctrl);
+
/* If we are resetting/deleting then do nothing */
- if (ctrl->ctrl.state != NVME_CTRL_CONNECTING) {
- WARN_ON_ONCE(ctrl->ctrl.state == NVME_CTRL_NEW ||
- ctrl->ctrl.state == NVME_CTRL_LIVE);
+ if (state != NVME_CTRL_CONNECTING) {
+ WARN_ON_ONCE(state == NVME_CTRL_NEW || state == NVME_CTRL_LIVE);
return;
}
@@ -1059,8 +1060,10 @@ static int nvme_rdma_setup_ctrl(struct nvme_rdma_ctrl *ctrl, bool new)
* unless we're during creation of a new controller to
* avoid races with teardown flow.
*/
- WARN_ON_ONCE(ctrl->ctrl.state != NVME_CTRL_DELETING &&
- ctrl->ctrl.state != NVME_CTRL_DELETING_NOIO);
+ enum nvme_ctrl_state state = nvme_ctrl_state(&ctrl->ctrl);
+
+ WARN_ON_ONCE(state != NVME_CTRL_DELETING &&
+ state != NVME_CTRL_DELETING_NOIO);
WARN_ON_ONCE(new);
ret = -EINVAL;
goto destroy_io;
@@ -1129,8 +1132,10 @@ static void nvme_rdma_error_recovery_work(struct work_struct *work)
if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) {
/* state change failure is ok if we started ctrl delete */
- WARN_ON_ONCE(ctrl->ctrl.state != NVME_CTRL_DELETING &&
- ctrl->ctrl.state != NVME_CTRL_DELETING_NOIO);
+ enum nvme_ctrl_state state = nvme_ctrl_state(&ctrl->ctrl);
+
+ WARN_ON_ONCE(state != NVME_CTRL_DELETING &&
+ state != NVME_CTRL_DELETING_NOIO);
return;
}
@@ -1162,7 +1167,7 @@ static void nvme_rdma_wr_error(struct ib_cq *cq, struct ib_wc *wc,
struct nvme_rdma_queue *queue = wc->qp->qp_context;
struct nvme_rdma_ctrl *ctrl = queue->ctrl;
- if (ctrl->ctrl.state == NVME_CTRL_LIVE)
+ if (nvme_ctrl_state(&ctrl->ctrl) == NVME_CTRL_LIVE)
dev_info(ctrl->ctrl.device,
"%s for CQE 0x%p failed with status %s (%d)\n",
op, wc->wr_cqe,
@@ -1945,7 +1950,7 @@ static enum blk_eh_timer_return nvme_rdma_timeout(struct request *rq)
dev_warn(ctrl->ctrl.device, "I/O %d QID %d timeout\n",
rq->tag, nvme_rdma_queue_idx(queue));
- if (ctrl->ctrl.state != NVME_CTRL_LIVE) {
+ if (nvme_ctrl_state(&ctrl->ctrl) != NVME_CTRL_LIVE) {
/*
* If we are resetting, connecting or deleting we should
* complete immediately because we may block controller
diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
index d79811cfa0ce..08805f027810 100644
--- a/drivers/nvme/host/tcp.c
+++ b/drivers/nvme/host/tcp.c
@@ -2152,10 +2152,11 @@ static void nvme_tcp_teardown_io_queues(struct nvme_ctrl *ctrl,
static void nvme_tcp_reconnect_or_remove(struct nvme_ctrl *ctrl)
{
+ enum nvme_ctrl_state state = nvme_ctrl_state(ctrl);
+
/* If we are resetting/deleting then do nothing */
- if (ctrl->state != NVME_CTRL_CONNECTING) {
- WARN_ON_ONCE(ctrl->state == NVME_CTRL_NEW ||
- ctrl->state == NVME_CTRL_LIVE);
+ if (state != NVME_CTRL_CONNECTING) {
+ WARN_ON_ONCE(state == NVME_CTRL_NEW || state == NVME_CTRL_LIVE);
return;
}
@@ -2215,8 +2216,10 @@ static int nvme_tcp_setup_ctrl(struct nvme_ctrl *ctrl, bool new)
* unless we're during creation of a new controller to
* avoid races with teardown flow.
*/
- WARN_ON_ONCE(ctrl->state != NVME_CTRL_DELETING &&
- ctrl->state != NVME_CTRL_DELETING_NOIO);
+ enum nvme_ctrl_state state = nvme_ctrl_state(ctrl);
+
+ WARN_ON_ONCE(state != NVME_CTRL_DELETING &&
+ state != NVME_CTRL_DELETING_NOIO);
WARN_ON_ONCE(new);
ret = -EINVAL;
goto destroy_io;
@@ -2280,8 +2283,10 @@ static void nvme_tcp_error_recovery_work(struct work_struct *work)
if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_CONNECTING)) {
/* state change failure is ok if we started ctrl delete */
- WARN_ON_ONCE(ctrl->state != NVME_CTRL_DELETING &&
- ctrl->state != NVME_CTRL_DELETING_NOIO);
+ enum nvme_ctrl_state state = nvme_ctrl_state(ctrl);
+
+ WARN_ON_ONCE(state != NVME_CTRL_DELETING &&
+ state != NVME_CTRL_DELETING_NOIO);
return;
}
@@ -2311,8 +2316,10 @@ static void nvme_reset_ctrl_work(struct work_struct *work)
if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_CONNECTING)) {
/* state change failure is ok if we started ctrl delete */
- WARN_ON_ONCE(ctrl->state != NVME_CTRL_DELETING &&
- ctrl->state != NVME_CTRL_DELETING_NOIO);
+ enum nvme_ctrl_state state = nvme_ctrl_state(ctrl);
+
+ WARN_ON_ONCE(state != NVME_CTRL_DELETING &&
+ state != NVME_CTRL_DELETING_NOIO);
return;
}
@@ -2430,7 +2437,7 @@ static enum blk_eh_timer_return nvme_tcp_timeout(struct request *rq)
nvme_tcp_queue_id(req->queue), nvme_cid(rq), pdu->hdr.type,
opc, nvme_opcode_str(qid, opc, fctype));
- if (ctrl->state != NVME_CTRL_LIVE) {
+ if (nvme_ctrl_state(ctrl) != NVME_CTRL_LIVE) {
/*
* If we are resetting, connecting or deleting we should
* complete immediately because we may block controller
diff --git a/drivers/nvme/target/Kconfig b/drivers/nvme/target/Kconfig
index e1ebc73f3e5e..872dd1a0acd8 100644
--- a/drivers/nvme/target/Kconfig
+++ b/drivers/nvme/target/Kconfig
@@ -99,10 +99,11 @@ config NVME_TARGET_TCP_TLS
If unsure, say N.
config NVME_TARGET_AUTH
- bool "NVMe over Fabrics In-band Authentication support"
+ bool "NVMe over Fabrics In-band Authentication in target side"
depends on NVME_TARGET
select NVME_AUTH
help
- This enables support for NVMe over Fabrics In-band Authentication
+ This enables support for NVMe over Fabrics In-band Authentication in
+ target side.
If unsure, say N.
diff --git a/drivers/nvme/target/configfs.c b/drivers/nvme/target/configfs.c
index e307a044b1a1..d937fe05129e 100644
--- a/drivers/nvme/target/configfs.c
+++ b/drivers/nvme/target/configfs.c
@@ -18,6 +18,7 @@
#include <linux/nvme-keyring.h>
#include <crypto/hash.h>
#include <crypto/kpp.h>
+#include <linux/nospec.h>
#include "nvmet.h"
@@ -621,6 +622,7 @@ static ssize_t nvmet_ns_ana_grpid_store(struct config_item *item,
down_write(&nvmet_ana_sem);
oldgrpid = ns->anagrpid;
+ newgrpid = array_index_nospec(newgrpid, NVMET_MAX_ANAGRPS);
nvmet_ana_group_enabled[newgrpid]++;
ns->anagrpid = newgrpid;
nvmet_ana_group_enabled[oldgrpid]--;
@@ -1812,6 +1814,7 @@ static struct config_group *nvmet_ana_groups_make_group(
grp->grpid = grpid;
down_write(&nvmet_ana_sem);
+ grpid = array_index_nospec(grpid, NVMET_MAX_ANAGRPS);
nvmet_ana_group_enabled[grpid]++;
up_write(&nvmet_ana_sem);
diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c
index bf42b7e826db..608b352a7d91 100644
--- a/drivers/nvmem/core.c
+++ b/drivers/nvmem/core.c
@@ -796,6 +796,12 @@ static struct nvmem_layout *nvmem_layout_get(struct nvmem_device *nvmem)
if (!layout_np)
return NULL;
+ /* Fixed layouts don't have a matching driver */
+ if (of_device_is_compatible(layout_np, "fixed-layout")) {
+ of_node_put(layout_np);
+ return NULL;
+ }
+
/*
* In case the nvmem device was built-in while the layout was built as a
* module, we shall manually request the layout driver loading otherwise
diff --git a/drivers/of/dynamic.c b/drivers/of/dynamic.c
index f63250c650ca..3bf27052832f 100644
--- a/drivers/of/dynamic.c
+++ b/drivers/of/dynamic.c
@@ -98,8 +98,9 @@ int of_reconfig_notify(unsigned long action, struct of_reconfig_data *p)
*
* Returns the new state of a device based on the notifier used.
*
- * Return: 0 on device going from enabled to disabled, 1 on device
- * going from disabled to enabled and -1 on no change.
+ * Return: OF_RECONFIG_CHANGE_REMOVE on device going from enabled to
+ * disabled, OF_RECONFIG_CHANGE_ADD on device going from disabled to
+ * enabled and OF_RECONFIG_NO_CHANGE on no change.
*/
int of_reconfig_get_state_change(unsigned long action, struct of_reconfig_data *pr)
{
diff --git a/drivers/parport/parport_pc.c b/drivers/parport/parport_pc.c
index 1f236aaf7867..f33b5d1ddfc1 100644
--- a/drivers/parport/parport_pc.c
+++ b/drivers/parport/parport_pc.c
@@ -2658,6 +2658,8 @@ enum parport_pc_pci_cards {
asix_ax99100,
quatech_sppxp100,
wch_ch382l,
+ brainboxes_uc146,
+ brainboxes_px203,
};
@@ -2737,6 +2739,8 @@ static struct parport_pc_pci {
/* asix_ax99100 */ { 1, { { 0, 1 }, } },
/* quatech_sppxp100 */ { 1, { { 0, 1 }, } },
/* wch_ch382l */ { 1, { { 2, -1 }, } },
+ /* brainboxes_uc146 */ { 1, { { 3, -1 }, } },
+ /* brainboxes_px203 */ { 1, { { 0, -1 }, } },
};
static const struct pci_device_id parport_pc_pci_tbl[] = {
@@ -2833,6 +2837,23 @@ static const struct pci_device_id parport_pc_pci_tbl[] = {
PCI_ANY_ID, PCI_ANY_ID, 0, 0, quatech_sppxp100 },
/* WCH CH382L PCI-E single parallel port card */
{ 0x1c00, 0x3050, 0x1c00, 0x3050, 0, 0, wch_ch382l },
+ /* Brainboxes IX-500/550 */
+ { PCI_VENDOR_ID_INTASHIELD, 0x402a,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, oxsemi_pcie_pport },
+ /* Brainboxes UC-146/UC-157 */
+ { PCI_VENDOR_ID_INTASHIELD, 0x0be1,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, brainboxes_uc146 },
+ { PCI_VENDOR_ID_INTASHIELD, 0x0be2,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, brainboxes_uc146 },
+ /* Brainboxes PX-146/PX-257 */
+ { PCI_VENDOR_ID_INTASHIELD, 0x401c,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, oxsemi_pcie_pport },
+ /* Brainboxes PX-203 */
+ { PCI_VENDOR_ID_INTASHIELD, 0x4007,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, brainboxes_px203 },
+ /* Brainboxes PX-475 */
+ { PCI_VENDOR_ID_INTASHIELD, 0x401f,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, oxsemi_pcie_pport },
{ 0, } /* terminate list */
};
MODULE_DEVICE_TABLE(pci, parport_pc_pci_tbl);
diff --git a/drivers/phy/qualcomm/Kconfig b/drivers/phy/qualcomm/Kconfig
index d891058b7c39..846f8c99547f 100644
--- a/drivers/phy/qualcomm/Kconfig
+++ b/drivers/phy/qualcomm/Kconfig
@@ -63,7 +63,7 @@ config PHY_QCOM_QMP_COMBO
depends on DRM || DRM=n
select GENERIC_PHY
select MFD_SYSCON
- select DRM_PANEL_BRIDGE if DRM
+ select DRM_AUX_BRIDGE if DRM_BRIDGE
help
Enable this to support the QMP Combo PHY transceiver that is used
with USB3 and DisplayPort controllers on Qualcomm chips.
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-combo.c b/drivers/phy/qualcomm/phy-qcom-qmp-combo.c
index 9c87845c78ec..f6c727249104 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp-combo.c
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-combo.c
@@ -21,7 +21,7 @@
#include <linux/usb/typec.h>
#include <linux/usb/typec_mux.h>
-#include <drm/drm_bridge.h>
+#include <drm/bridge/aux-bridge.h>
#include <dt-bindings/phy/phy-qcom-qmp.h>
@@ -1419,8 +1419,6 @@ struct qmp_combo {
struct clk_hw dp_link_hw;
struct clk_hw dp_pixel_hw;
- struct drm_bridge bridge;
-
struct typec_switch_dev *sw;
enum typec_orientation orientation;
};
@@ -3191,44 +3189,6 @@ static int qmp_combo_typec_switch_register(struct qmp_combo *qmp)
}
#endif
-#if IS_ENABLED(CONFIG_DRM)
-static int qmp_combo_bridge_attach(struct drm_bridge *bridge,
- enum drm_bridge_attach_flags flags)
-{
- struct qmp_combo *qmp = container_of(bridge, struct qmp_combo, bridge);
- struct drm_bridge *next_bridge;
-
- if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR))
- return -EINVAL;
-
- next_bridge = devm_drm_of_get_bridge(qmp->dev, qmp->dev->of_node, 0, 0);
- if (IS_ERR(next_bridge)) {
- dev_err(qmp->dev, "failed to acquire drm_bridge: %pe\n", next_bridge);
- return PTR_ERR(next_bridge);
- }
-
- return drm_bridge_attach(bridge->encoder, next_bridge, bridge,
- DRM_BRIDGE_ATTACH_NO_CONNECTOR);
-}
-
-static const struct drm_bridge_funcs qmp_combo_bridge_funcs = {
- .attach = qmp_combo_bridge_attach,
-};
-
-static int qmp_combo_dp_register_bridge(struct qmp_combo *qmp)
-{
- qmp->bridge.funcs = &qmp_combo_bridge_funcs;
- qmp->bridge.of_node = qmp->dev->of_node;
-
- return devm_drm_bridge_add(qmp->dev, &qmp->bridge);
-}
-#else
-static int qmp_combo_dp_register_bridge(struct qmp_combo *qmp)
-{
- return 0;
-}
-#endif
-
static int qmp_combo_parse_dt_lecacy_dp(struct qmp_combo *qmp, struct device_node *np)
{
struct device *dev = qmp->dev;
@@ -3440,7 +3400,7 @@ static int qmp_combo_probe(struct platform_device *pdev)
if (ret)
return ret;
- ret = qmp_combo_dp_register_bridge(qmp);
+ ret = drm_aux_bridge_register(dev);
if (ret)
return ret;
diff --git a/drivers/pinctrl/cirrus/Kconfig b/drivers/pinctrl/cirrus/Kconfig
index d6318cb57aff..e7e827a8877a 100644
--- a/drivers/pinctrl/cirrus/Kconfig
+++ b/drivers/pinctrl/cirrus/Kconfig
@@ -12,7 +12,8 @@ config PINCTRL_CS42L43
config PINCTRL_LOCHNAGAR
tristate "Cirrus Logic Lochnagar pinctrl driver"
- depends on MFD_LOCHNAGAR
+ # Avoid clash caused by MIPS defining RST, which is used in the driver
+ depends on MFD_LOCHNAGAR && !MIPS
select GPIOLIB
select PINMUX
select PINCONF
diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c
index 1fa89be29b8f..f2977eb65522 100644
--- a/drivers/pinctrl/core.c
+++ b/drivers/pinctrl/core.c
@@ -1262,17 +1262,17 @@ static void pinctrl_link_add(struct pinctrl_dev *pctldev,
static int pinctrl_commit_state(struct pinctrl *p, struct pinctrl_state *state)
{
struct pinctrl_setting *setting, *setting2;
- struct pinctrl_state *old_state = p->state;
+ struct pinctrl_state *old_state = READ_ONCE(p->state);
int ret;
- if (p->state) {
+ if (old_state) {
/*
* For each pinmux setting in the old state, forget SW's record
* of mux owner for that pingroup. Any pingroups which are
* still owned by the new state will be re-acquired by the call
* to pinmux_enable_setting() in the loop below.
*/
- list_for_each_entry(setting, &p->state->settings, node) {
+ list_for_each_entry(setting, &old_state->settings, node) {
if (setting->type != PIN_MAP_TYPE_MUX_GROUP)
continue;
pinmux_disable_setting(setting);
diff --git a/drivers/pinctrl/nxp/pinctrl-s32cc.c b/drivers/pinctrl/nxp/pinctrl-s32cc.c
index 7daff9f186cd..f0cad2c501f7 100644
--- a/drivers/pinctrl/nxp/pinctrl-s32cc.c
+++ b/drivers/pinctrl/nxp/pinctrl-s32cc.c
@@ -843,8 +843,8 @@ static int s32_pinctrl_probe_dt(struct platform_device *pdev,
if (!np)
return -ENODEV;
- if (mem_regions == 0) {
- dev_err(&pdev->dev, "mem_regions is 0\n");
+ if (mem_regions == 0 || mem_regions >= 10000) {
+ dev_err(&pdev->dev, "mem_regions is invalid: %u\n", mem_regions);
return -EINVAL;
}
diff --git a/drivers/pinctrl/pinctrl-cy8c95x0.c b/drivers/pinctrl/pinctrl-cy8c95x0.c
index 04285c930e94..4ccfa99ed93a 100644
--- a/drivers/pinctrl/pinctrl-cy8c95x0.c
+++ b/drivers/pinctrl/pinctrl-cy8c95x0.c
@@ -143,6 +143,7 @@ static const struct dmi_system_id cy8c95x0_dmi_acpi_irq_info[] = {
* @pinctrl_desc: pin controller description
* @name: Chip controller name
* @tpin: Total number of pins
+ * @gpio_reset: GPIO line handler that can reset the IC
*/
struct cy8c95x0_pinctrl {
struct regmap *regmap;
diff --git a/drivers/pinctrl/realtek/pinctrl-rtd.c b/drivers/pinctrl/realtek/pinctrl-rtd.c
index 9c7a1af4ba69..208896593b61 100644
--- a/drivers/pinctrl/realtek/pinctrl-rtd.c
+++ b/drivers/pinctrl/realtek/pinctrl-rtd.c
@@ -146,7 +146,7 @@ static int rtd_pinctrl_get_function_groups(struct pinctrl_dev *pcdev,
static const struct rtd_pin_desc *rtd_pinctrl_find_mux(struct rtd_pinctrl *data, unsigned int pin)
{
- if (!data->info->muxes[pin].name)
+ if (data->info->muxes[pin].name)
return &data->info->muxes[pin];
return NULL;
@@ -249,7 +249,7 @@ static const struct pinctrl_pin_desc
static const struct rtd_pin_config_desc
*rtd_pinctrl_find_config(struct rtd_pinctrl *data, unsigned int pin)
{
- if (!data->info->configs[pin].name)
+ if (data->info->configs[pin].name)
return &data->info->configs[pin];
return NULL;
diff --git a/drivers/pinctrl/stm32/pinctrl-stm32.c b/drivers/pinctrl/stm32/pinctrl-stm32.c
index 64e8201c7eac..603f900e88c1 100644
--- a/drivers/pinctrl/stm32/pinctrl-stm32.c
+++ b/drivers/pinctrl/stm32/pinctrl-stm32.c
@@ -1273,9 +1273,11 @@ static struct stm32_desc_pin *stm32_pctrl_get_desc_pin_from_gpio(struct stm32_pi
int i;
/* With few exceptions (e.g. bank 'Z'), pin number matches with pin index in array */
- pin_desc = pctl->pins + stm32_pin_nb;
- if (pin_desc->pin.number == stm32_pin_nb)
- return pin_desc;
+ if (stm32_pin_nb < pctl->npins) {
+ pin_desc = pctl->pins + stm32_pin_nb;
+ if (pin_desc->pin.number == stm32_pin_nb)
+ return pin_desc;
+ }
/* Otherwise, loop all array to find the pin with the right number */
for (i = 0; i < pctl->npins; i++) {
@@ -1368,6 +1370,11 @@ static int stm32_gpiolib_register_bank(struct stm32_pinctrl *pctl, struct fwnode
}
names = devm_kcalloc(dev, npins, sizeof(char *), GFP_KERNEL);
+ if (!names) {
+ err = -ENOMEM;
+ goto err_clk;
+ }
+
for (i = 0; i < npins; i++) {
stm32_pin = stm32_pctrl_get_desc_pin_from_gpio(pctl, bank, i);
if (stm32_pin && stm32_pin->pin.name)
diff --git a/drivers/platform/mellanox/mlxbf-bootctl.c b/drivers/platform/mellanox/mlxbf-bootctl.c
index 1ac7dab22c63..c1aef3a8fb2d 100644
--- a/drivers/platform/mellanox/mlxbf-bootctl.c
+++ b/drivers/platform/mellanox/mlxbf-bootctl.c
@@ -20,6 +20,7 @@
#define MLXBF_BOOTCTL_SB_SECURE_MASK 0x03
#define MLXBF_BOOTCTL_SB_TEST_MASK 0x0c
+#define MLXBF_BOOTCTL_SB_DEV_MASK BIT(4)
#define MLXBF_SB_KEY_NUM 4
@@ -40,11 +41,18 @@ static struct mlxbf_bootctl_name boot_names[] = {
{ MLXBF_BOOTCTL_NONE, "none" },
};
+enum {
+ MLXBF_BOOTCTL_SB_LIFECYCLE_PRODUCTION = 0,
+ MLXBF_BOOTCTL_SB_LIFECYCLE_GA_SECURE = 1,
+ MLXBF_BOOTCTL_SB_LIFECYCLE_GA_NON_SECURE = 2,
+ MLXBF_BOOTCTL_SB_LIFECYCLE_RMA = 3
+};
+
static const char * const mlxbf_bootctl_lifecycle_states[] = {
- [0] = "Production",
- [1] = "GA Secured",
- [2] = "GA Non-Secured",
- [3] = "RMA",
+ [MLXBF_BOOTCTL_SB_LIFECYCLE_PRODUCTION] = "Production",
+ [MLXBF_BOOTCTL_SB_LIFECYCLE_GA_SECURE] = "GA Secured",
+ [MLXBF_BOOTCTL_SB_LIFECYCLE_GA_NON_SECURE] = "GA Non-Secured",
+ [MLXBF_BOOTCTL_SB_LIFECYCLE_RMA] = "RMA",
};
/* Log header format. */
@@ -247,25 +255,30 @@ static ssize_t second_reset_action_store(struct device *dev,
static ssize_t lifecycle_state_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
+ int status_bits;
+ int use_dev_key;
+ int test_state;
int lc_state;
- lc_state = mlxbf_bootctl_smc(MLXBF_BOOTCTL_GET_TBB_FUSE_STATUS,
- MLXBF_BOOTCTL_FUSE_STATUS_LIFECYCLE);
- if (lc_state < 0)
- return lc_state;
+ status_bits = mlxbf_bootctl_smc(MLXBF_BOOTCTL_GET_TBB_FUSE_STATUS,
+ MLXBF_BOOTCTL_FUSE_STATUS_LIFECYCLE);
+ if (status_bits < 0)
+ return status_bits;
- lc_state &=
- MLXBF_BOOTCTL_SB_TEST_MASK | MLXBF_BOOTCTL_SB_SECURE_MASK;
+ use_dev_key = status_bits & MLXBF_BOOTCTL_SB_DEV_MASK;
+ test_state = status_bits & MLXBF_BOOTCTL_SB_TEST_MASK;
+ lc_state = status_bits & MLXBF_BOOTCTL_SB_SECURE_MASK;
/*
* If the test bits are set, we specify that the current state may be
* due to using the test bits.
*/
- if (lc_state & MLXBF_BOOTCTL_SB_TEST_MASK) {
- lc_state &= MLXBF_BOOTCTL_SB_SECURE_MASK;
-
+ if (test_state) {
return sprintf(buf, "%s(test)\n",
mlxbf_bootctl_lifecycle_states[lc_state]);
+ } else if (use_dev_key &&
+ (lc_state == MLXBF_BOOTCTL_SB_LIFECYCLE_GA_SECURE)) {
+ return sprintf(buf, "Secured (development)\n");
}
return sprintf(buf, "%s\n", mlxbf_bootctl_lifecycle_states[lc_state]);
diff --git a/drivers/platform/mellanox/mlxbf-pmc.c b/drivers/platform/mellanox/mlxbf-pmc.c
index 0b427fc24a96..1dd84c7a79de 100644
--- a/drivers/platform/mellanox/mlxbf-pmc.c
+++ b/drivers/platform/mellanox/mlxbf-pmc.c
@@ -1771,6 +1771,8 @@ static int mlxbf_pmc_init_perftype_counter(struct device *dev, int blk_num)
attr->dev_attr.show = mlxbf_pmc_event_list_show;
attr->nr = blk_num;
attr->dev_attr.attr.name = devm_kasprintf(dev, GFP_KERNEL, "event_list");
+ if (!attr->dev_attr.attr.name)
+ return -ENOMEM;
pmc->block[blk_num].block_attr[i] = &attr->dev_attr.attr;
attr = NULL;
@@ -1784,6 +1786,8 @@ static int mlxbf_pmc_init_perftype_counter(struct device *dev, int blk_num)
attr->nr = blk_num;
attr->dev_attr.attr.name = devm_kasprintf(dev, GFP_KERNEL,
"enable");
+ if (!attr->dev_attr.attr.name)
+ return -ENOMEM;
pmc->block[blk_num].block_attr[++i] = &attr->dev_attr.attr;
attr = NULL;
}
@@ -1810,6 +1814,8 @@ static int mlxbf_pmc_init_perftype_counter(struct device *dev, int blk_num)
attr->nr = blk_num;
attr->dev_attr.attr.name = devm_kasprintf(dev, GFP_KERNEL,
"counter%d", j);
+ if (!attr->dev_attr.attr.name)
+ return -ENOMEM;
pmc->block[blk_num].block_attr[++i] = &attr->dev_attr.attr;
attr = NULL;
@@ -1821,6 +1827,8 @@ static int mlxbf_pmc_init_perftype_counter(struct device *dev, int blk_num)
attr->nr = blk_num;
attr->dev_attr.attr.name = devm_kasprintf(dev, GFP_KERNEL,
"event%d", j);
+ if (!attr->dev_attr.attr.name)
+ return -ENOMEM;
pmc->block[blk_num].block_attr[++i] = &attr->dev_attr.attr;
attr = NULL;
}
@@ -1853,6 +1861,8 @@ static int mlxbf_pmc_init_perftype_reg(struct device *dev, int blk_num)
attr->nr = blk_num;
attr->dev_attr.attr.name = devm_kasprintf(dev, GFP_KERNEL,
events[j].evt_name);
+ if (!attr->dev_attr.attr.name)
+ return -ENOMEM;
pmc->block[blk_num].block_attr[i] = &attr->dev_attr.attr;
attr = NULL;
i++;
@@ -1882,6 +1892,8 @@ static int mlxbf_pmc_create_groups(struct device *dev, int blk_num)
pmc->block[blk_num].block_attr_grp.attrs = pmc->block[blk_num].block_attr;
pmc->block[blk_num].block_attr_grp.name = devm_kasprintf(
dev, GFP_KERNEL, pmc->block_name[blk_num]);
+ if (!pmc->block[blk_num].block_attr_grp.name)
+ return -ENOMEM;
pmc->groups[pmc->group_num] = &pmc->block[blk_num].block_attr_grp;
pmc->group_num++;
@@ -2063,6 +2075,8 @@ static int mlxbf_pmc_probe(struct platform_device *pdev)
pmc->hwmon_dev = devm_hwmon_device_register_with_groups(
dev, "bfperf", pmc, pmc->groups);
+ if (IS_ERR(pmc->hwmon_dev))
+ return PTR_ERR(pmc->hwmon_dev);
platform_set_drvdata(pdev, pmc);
return 0;
diff --git a/drivers/platform/surface/aggregator/core.c b/drivers/platform/surface/aggregator/core.c
index 1a6373dea109..6152be38398c 100644
--- a/drivers/platform/surface/aggregator/core.c
+++ b/drivers/platform/surface/aggregator/core.c
@@ -231,9 +231,12 @@ static int ssam_receive_buf(struct serdev_device *dev, const unsigned char *buf,
size_t n)
{
struct ssam_controller *ctrl;
+ int ret;
ctrl = serdev_device_get_drvdata(dev);
- return ssam_controller_receive_buf(ctrl, buf, n);
+ ret = ssam_controller_receive_buf(ctrl, buf, n);
+
+ return ret < 0 ? 0 : ret;
}
static void ssam_write_wakeup(struct serdev_device *dev)
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index 7e69fdaccdd5..c94f31a5c6a3 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -263,6 +263,7 @@ config ASUS_WMI
depends on RFKILL || RFKILL = n
depends on HOTPLUG_PCI
depends on ACPI_VIDEO || ACPI_VIDEO = n
+ depends on SERIO_I8042 || SERIO_I8042 = n
select INPUT_SPARSEKMAP
select LEDS_CLASS
select NEW_LEDS
@@ -279,7 +280,6 @@ config ASUS_WMI
config ASUS_NB_WMI
tristate "Asus Notebook WMI Driver"
depends on ASUS_WMI
- depends on SERIO_I8042 || SERIO_I8042 = n
help
This is a driver for newer Asus notebooks. It adds extra features
like wireless radio and bluetooth control, leds, hotkeys, backlight...
diff --git a/drivers/platform/x86/asus-nb-wmi.c b/drivers/platform/x86/asus-nb-wmi.c
index 9aa1226e74e6..fceffe2082ec 100644
--- a/drivers/platform/x86/asus-nb-wmi.c
+++ b/drivers/platform/x86/asus-nb-wmi.c
@@ -48,25 +48,43 @@ module_param(tablet_mode_sw, uint, 0444);
MODULE_PARM_DESC(tablet_mode_sw, "Tablet mode detect: -1:auto 0:disable 1:kbd-dock 2:lid-flip 3:lid-flip-rog");
static struct quirk_entry *quirks;
+static bool atkbd_reports_vol_keys;
-static bool asus_q500a_i8042_filter(unsigned char data, unsigned char str,
- struct serio *port)
+static bool asus_i8042_filter(unsigned char data, unsigned char str, struct serio *port)
{
- static bool extended;
- bool ret = false;
+ static bool extended_e0;
+ static bool extended_e1;
if (str & I8042_STR_AUXDATA)
return false;
- if (unlikely(data == 0xe1)) {
- extended = true;
- ret = true;
- } else if (unlikely(extended)) {
- extended = false;
- ret = true;
+ if (quirks->filter_i8042_e1_extended_codes) {
+ if (data == 0xe1) {
+ extended_e1 = true;
+ return true;
+ }
+
+ if (extended_e1) {
+ extended_e1 = false;
+ return true;
+ }
}
- return ret;
+ if (data == 0xe0) {
+ extended_e0 = true;
+ } else if (extended_e0) {
+ extended_e0 = false;
+
+ switch (data & 0x7f) {
+ case 0x20: /* e0 20 / e0 a0, Volume Mute press / release */
+ case 0x2e: /* e0 2e / e0 ae, Volume Down press / release */
+ case 0x30: /* e0 30 / e0 b0, Volume Up press / release */
+ atkbd_reports_vol_keys = true;
+ break;
+ }
+ }
+
+ return false;
}
static struct quirk_entry quirk_asus_unknown = {
@@ -75,7 +93,7 @@ static struct quirk_entry quirk_asus_unknown = {
};
static struct quirk_entry quirk_asus_q500a = {
- .i8042_filter = asus_q500a_i8042_filter,
+ .filter_i8042_e1_extended_codes = true,
.wmi_backlight_set_devstate = true,
};
@@ -503,8 +521,6 @@ static const struct dmi_system_id asus_quirks[] = {
static void asus_nb_wmi_quirks(struct asus_wmi_driver *driver)
{
- int ret;
-
quirks = &quirk_asus_unknown;
dmi_check_system(asus_quirks);
@@ -519,15 +535,6 @@ static void asus_nb_wmi_quirks(struct asus_wmi_driver *driver)
if (tablet_mode_sw != -1)
quirks->tablet_switch_mode = tablet_mode_sw;
-
- if (quirks->i8042_filter) {
- ret = i8042_install_filter(quirks->i8042_filter);
- if (ret) {
- pr_warn("Unable to install key filter\n");
- return;
- }
- pr_info("Using i8042 filter function for receiving events\n");
- }
}
static const struct key_entry asus_nb_wmi_keymap[] = {
@@ -618,6 +625,13 @@ static void asus_nb_wmi_key_filter(struct asus_wmi_driver *asus_wmi, int *code,
*code = ASUS_WMI_KEY_IGNORE;
break;
+ case 0x30: /* Volume Up */
+ case 0x31: /* Volume Down */
+ case 0x32: /* Volume Mute */
+ if (atkbd_reports_vol_keys)
+ *code = ASUS_WMI_KEY_IGNORE;
+
+ break;
}
}
@@ -630,6 +644,7 @@ static struct asus_wmi_driver asus_nb_wmi_driver = {
.input_phys = ASUS_NB_WMI_FILE "/input0",
.detect_quirks = asus_nb_wmi_quirks,
.key_filter = asus_nb_wmi_key_filter,
+ .i8042_filter = asus_i8042_filter,
};
diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c
index 6a79f16233ab..9f7e23c5c6b4 100644
--- a/drivers/platform/x86/asus-wmi.c
+++ b/drivers/platform/x86/asus-wmi.c
@@ -16,6 +16,7 @@
#include <linux/acpi.h>
#include <linux/backlight.h>
#include <linux/debugfs.h>
+#include <linux/delay.h>
#include <linux/dmi.h>
#include <linux/fb.h>
#include <linux/hwmon.h>
@@ -132,6 +133,11 @@ module_param(fnlock_default, bool, 0444);
#define ASUS_SCREENPAD_BRIGHT_MAX 255
#define ASUS_SCREENPAD_BRIGHT_DEFAULT 60
+/* Controls the power state of the USB0 hub on ROG Ally which input is on */
+#define ASUS_USB0_PWR_EC0_CSEE "\\_SB.PCI0.SBRG.EC0.CSEE"
+/* 300ms so far seems to produce a reliable result on AC and battery */
+#define ASUS_USB0_PWR_EC0_CSEE_WAIT 300
+
static const char * const ashs_ids[] = { "ATK4001", "ATK4002", NULL };
static int throttle_thermal_policy_write(struct asus_wmi *);
@@ -300,6 +306,9 @@ struct asus_wmi {
bool fnlock_locked;
+ /* The ROG Ally device requires the MCU USB device be disconnected before suspend */
+ bool ally_mcu_usb_switch;
+
struct asus_wmi_debug debug;
struct asus_wmi_driver *driver;
@@ -4488,6 +4497,8 @@ static int asus_wmi_add(struct platform_device *pdev)
asus->nv_temp_tgt_available = asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_NV_THERM_TARGET);
asus->panel_overdrive_available = asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_PANEL_OD);
asus->mini_led_mode_available = asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_MINI_LED_MODE);
+ asus->ally_mcu_usb_switch = acpi_has_method(NULL, ASUS_USB0_PWR_EC0_CSEE)
+ && dmi_match(DMI_BOARD_NAME, "RC71L");
err = fan_boost_mode_check_present(asus);
if (err)
@@ -4567,6 +4578,12 @@ static int asus_wmi_add(struct platform_device *pdev)
goto fail_wmi_handler;
}
+ if (asus->driver->i8042_filter) {
+ err = i8042_install_filter(asus->driver->i8042_filter);
+ if (err)
+ pr_warn("Unable to install key filter - %d\n", err);
+ }
+
asus_wmi_battery_init(asus);
asus_wmi_debugfs_init(asus);
@@ -4603,6 +4620,8 @@ static int asus_wmi_remove(struct platform_device *device)
struct asus_wmi *asus;
asus = platform_get_drvdata(device);
+ if (asus->driver->i8042_filter)
+ i8042_remove_filter(asus->driver->i8042_filter);
wmi_remove_notify_handler(asus->driver->event_guid);
asus_wmi_backlight_exit(asus);
asus_screenpad_exit(asus);
@@ -4654,6 +4673,43 @@ static int asus_hotk_resume(struct device *device)
asus_wmi_fnlock_update(asus);
asus_wmi_tablet_mode_get_state(asus);
+
+ return 0;
+}
+
+static int asus_hotk_resume_early(struct device *device)
+{
+ struct asus_wmi *asus = dev_get_drvdata(device);
+
+ if (asus->ally_mcu_usb_switch) {
+ if (ACPI_FAILURE(acpi_execute_simple_method(NULL, ASUS_USB0_PWR_EC0_CSEE, 0xB8)))
+ dev_err(device, "ROG Ally MCU failed to connect USB dev\n");
+ else
+ msleep(ASUS_USB0_PWR_EC0_CSEE_WAIT);
+ }
+ return 0;
+}
+
+static int asus_hotk_prepare(struct device *device)
+{
+ struct asus_wmi *asus = dev_get_drvdata(device);
+ int result, err;
+
+ if (asus->ally_mcu_usb_switch) {
+ /* When powersave is enabled it causes many issues with resume of USB hub */
+ result = asus_wmi_get_devstate_simple(asus, ASUS_WMI_DEVID_MCU_POWERSAVE);
+ if (result == 1) {
+ dev_warn(device, "MCU powersave enabled, disabling to prevent resume issues");
+ err = asus_wmi_set_devstate(ASUS_WMI_DEVID_MCU_POWERSAVE, 0, &result);
+ if (err || result != 1)
+ dev_err(device, "Failed to set MCU powersave mode: %d\n", err);
+ }
+ /* sleep required to ensure USB0 is disabled before sleep continues */
+ if (ACPI_FAILURE(acpi_execute_simple_method(NULL, ASUS_USB0_PWR_EC0_CSEE, 0xB7)))
+ dev_err(device, "ROG Ally MCU failed to disconnect USB dev\n");
+ else
+ msleep(ASUS_USB0_PWR_EC0_CSEE_WAIT);
+ }
return 0;
}
@@ -4701,6 +4757,8 @@ static const struct dev_pm_ops asus_pm_ops = {
.thaw = asus_hotk_thaw,
.restore = asus_hotk_restore,
.resume = asus_hotk_resume,
+ .resume_early = asus_hotk_resume_early,
+ .prepare = asus_hotk_prepare,
};
/* Registration ***************************************************************/
diff --git a/drivers/platform/x86/asus-wmi.h b/drivers/platform/x86/asus-wmi.h
index adb67c925724..cc30f1853847 100644
--- a/drivers/platform/x86/asus-wmi.h
+++ b/drivers/platform/x86/asus-wmi.h
@@ -39,6 +39,7 @@ struct quirk_entry {
bool wmi_backlight_set_devstate;
bool wmi_force_als_set;
bool wmi_ignore_fan;
+ bool filter_i8042_e1_extended_codes;
enum asus_wmi_tablet_switch_mode tablet_switch_mode;
int wapf;
/*
@@ -49,9 +50,6 @@ struct quirk_entry {
*/
int no_display_toggle;
u32 xusb2pr;
-
- bool (*i8042_filter)(unsigned char data, unsigned char str,
- struct serio *serio);
};
struct asus_wmi_driver {
@@ -73,6 +71,9 @@ struct asus_wmi_driver {
* Return ASUS_WMI_KEY_IGNORE in code if event should be ignored. */
void (*key_filter) (struct asus_wmi_driver *driver, int *code,
unsigned int *value, bool *autorelease);
+ /* Optional standard i8042 filter */
+ bool (*i8042_filter)(unsigned char data, unsigned char str,
+ struct serio *serio);
int (*probe) (struct platform_device *device);
void (*detect_quirks) (struct asus_wmi_driver *driver);
diff --git a/drivers/platform/x86/wmi.c b/drivers/platform/x86/wmi.c
index 5c27b4aa9690..5dd22258cb3b 100644
--- a/drivers/platform/x86/wmi.c
+++ b/drivers/platform/x86/wmi.c
@@ -1340,6 +1340,11 @@ static int parse_wdg(struct device *wmi_bus_dev, struct platform_device *pdev)
if (debug_dump_wdg)
wmi_dump_wdg(&gblock[i]);
+ if (!gblock[i].instance_count) {
+ dev_info(wmi_bus_dev, FW_INFO "%pUL has zero instances\n", &gblock[i].guid);
+ continue;
+ }
+
if (guid_already_parsed_for_legacy(device, &gblock[i].guid))
continue;
diff --git a/drivers/pmdomain/arm/scmi_perf_domain.c b/drivers/pmdomain/arm/scmi_perf_domain.c
index bc3f78abb6da..709bbc448fad 100644
--- a/drivers/pmdomain/arm/scmi_perf_domain.c
+++ b/drivers/pmdomain/arm/scmi_perf_domain.c
@@ -35,7 +35,7 @@ scmi_pd_set_perf_state(struct generic_pm_domain *genpd, unsigned int state)
if (!state)
return -EINVAL;
- ret = pd->perf_ops->level_set(pd->ph, pd->domain_id, state, true);
+ ret = pd->perf_ops->level_set(pd->ph, pd->domain_id, state, false);
if (ret)
dev_warn(&genpd->dev, "Failed with %d when trying to set %d perf level",
ret, state);
diff --git a/drivers/pmdomain/qcom/rpmpd.c b/drivers/pmdomain/qcom/rpmpd.c
index 07590a3ef19c..7796d65f96e8 100644
--- a/drivers/pmdomain/qcom/rpmpd.c
+++ b/drivers/pmdomain/qcom/rpmpd.c
@@ -1044,6 +1044,7 @@ static int rpmpd_probe(struct platform_device *pdev)
rpmpds[i]->pd.power_off = rpmpd_power_off;
rpmpds[i]->pd.power_on = rpmpd_power_on;
rpmpds[i]->pd.set_performance_state = rpmpd_set_performance;
+ rpmpds[i]->pd.flags = GENPD_FLAG_ACTIVE_WAKEUP;
pm_genpd_init(&rpmpds[i]->pd, NULL, true);
data->domains[i] = &rpmpds[i]->pd;
diff --git a/drivers/powercap/dtpm_cpu.c b/drivers/powercap/dtpm_cpu.c
index 2ff7717530bf..9193c3b8edeb 100644
--- a/drivers/powercap/dtpm_cpu.c
+++ b/drivers/powercap/dtpm_cpu.c
@@ -24,7 +24,6 @@
#include <linux/of.h>
#include <linux/pm_qos.h>
#include <linux/slab.h>
-#include <linux/units.h>
struct dtpm_cpu {
struct dtpm dtpm;
@@ -104,8 +103,7 @@ static u64 get_pd_power_uw(struct dtpm *dtpm)
if (pd->table[i].frequency < freq)
continue;
- return scale_pd_power_uw(pd_mask, pd->table[i].power *
- MICROWATT_PER_MILLIWATT);
+ return scale_pd_power_uw(pd_mask, pd->table[i].power);
}
return 0;
@@ -122,11 +120,9 @@ static int update_pd_power_uw(struct dtpm *dtpm)
nr_cpus = cpumask_weight(&cpus);
dtpm->power_min = em->table[0].power;
- dtpm->power_min *= MICROWATT_PER_MILLIWATT;
dtpm->power_min *= nr_cpus;
dtpm->power_max = em->table[em->nr_perf_states - 1].power;
- dtpm->power_max *= MICROWATT_PER_MILLIWATT;
dtpm->power_max *= nr_cpus;
return 0;
@@ -144,6 +140,8 @@ static void pd_release(struct dtpm *dtpm)
if (policy) {
for_each_cpu(dtpm_cpu->cpu, policy->related_cpus)
per_cpu(dtpm_per_cpu, dtpm_cpu->cpu) = NULL;
+
+ cpufreq_cpu_put(policy);
}
kfree(dtpm_cpu);
@@ -195,12 +193,16 @@ static int __dtpm_cpu_setup(int cpu, struct dtpm *parent)
return 0;
pd = em_cpu_get(cpu);
- if (!pd || em_is_artificial(pd))
- return -EINVAL;
+ if (!pd || em_is_artificial(pd)) {
+ ret = -EINVAL;
+ goto release_policy;
+ }
dtpm_cpu = kzalloc(sizeof(*dtpm_cpu), GFP_KERNEL);
- if (!dtpm_cpu)
- return -ENOMEM;
+ if (!dtpm_cpu) {
+ ret = -ENOMEM;
+ goto release_policy;
+ }
dtpm_init(&dtpm_cpu->dtpm, &dtpm_ops);
dtpm_cpu->cpu = cpu;
@@ -220,6 +222,7 @@ static int __dtpm_cpu_setup(int cpu, struct dtpm *parent)
if (ret)
goto out_dtpm_unregister;
+ cpufreq_cpu_put(policy);
return 0;
out_dtpm_unregister:
@@ -231,6 +234,8 @@ out_kfree_dtpm_cpu:
per_cpu(dtpm_per_cpu, cpu) = NULL;
kfree(dtpm_cpu);
+release_policy:
+ cpufreq_cpu_put(policy);
return ret;
}
diff --git a/drivers/powercap/dtpm_devfreq.c b/drivers/powercap/dtpm_devfreq.c
index 91276761a31d..612c3b59dd5b 100644
--- a/drivers/powercap/dtpm_devfreq.c
+++ b/drivers/powercap/dtpm_devfreq.c
@@ -39,10 +39,8 @@ static int update_pd_power_uw(struct dtpm *dtpm)
struct em_perf_domain *pd = em_pd_get(dev);
dtpm->power_min = pd->table[0].power;
- dtpm->power_min *= MICROWATT_PER_MILLIWATT;
dtpm->power_max = pd->table[pd->nr_perf_states - 1].power;
- dtpm->power_max *= MICROWATT_PER_MILLIWATT;
return 0;
}
@@ -54,13 +52,10 @@ static u64 set_pd_power_limit(struct dtpm *dtpm, u64 power_limit)
struct device *dev = devfreq->dev.parent;
struct em_perf_domain *pd = em_pd_get(dev);
unsigned long freq;
- u64 power;
int i;
for (i = 0; i < pd->nr_perf_states; i++) {
-
- power = pd->table[i].power * MICROWATT_PER_MILLIWATT;
- if (power > power_limit)
+ if (pd->table[i].power > power_limit)
break;
}
@@ -68,7 +63,7 @@ static u64 set_pd_power_limit(struct dtpm *dtpm, u64 power_limit)
dev_pm_qos_update_request(&dtpm_devfreq->qos_req, freq);
- power_limit = pd->table[i - 1].power * MICROWATT_PER_MILLIWATT;
+ power_limit = pd->table[i - 1].power;
return power_limit;
}
@@ -110,7 +105,7 @@ static u64 get_pd_power_uw(struct dtpm *dtpm)
if (pd->table[i].frequency < freq)
continue;
- power = pd->table[i].power * MICROWATT_PER_MILLIWATT;
+ power = pd->table[i].power;
power *= status.busy_time;
power >>= 10;
diff --git a/drivers/pwm/pwm-bcm2835.c b/drivers/pwm/pwm-bcm2835.c
index 9777babd5b95..ab30667f4f95 100644
--- a/drivers/pwm/pwm-bcm2835.c
+++ b/drivers/pwm/pwm-bcm2835.c
@@ -155,6 +155,8 @@ static int bcm2835_pwm_probe(struct platform_device *pdev)
pc->chip.ops = &bcm2835_pwm_ops;
pc->chip.npwm = 2;
+ platform_set_drvdata(pdev, pc);
+
ret = devm_pwmchip_add(&pdev->dev, &pc->chip);
if (ret < 0)
return dev_err_probe(&pdev->dev, ret,
diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c
index e48f14ad6dfd..06acb5ff609e 100644
--- a/drivers/scsi/be2iscsi/be_main.c
+++ b/drivers/scsi/be2iscsi/be_main.c
@@ -2710,6 +2710,7 @@ init_wrb_hndl_failed:
kfree(pwrb_context->pwrb_handle_base);
kfree(pwrb_context->pwrb_handle_basestd);
}
+ kfree(phwi_ctxt->be_wrbq);
return -ENOMEM;
}
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index fa00dd503cbf..542a4bbb21bc 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -3949,8 +3949,15 @@ static int sd_resume(struct device *dev, bool runtime)
static int sd_resume_system(struct device *dev)
{
- if (pm_runtime_suspended(dev))
+ if (pm_runtime_suspended(dev)) {
+ struct scsi_disk *sdkp = dev_get_drvdata(dev);
+ struct scsi_device *sdp = sdkp ? sdkp->device : NULL;
+
+ if (sdp && sdp->force_runtime_start_on_system_start)
+ pm_request_resume(dev);
+
return 0;
+ }
return sd_resume(dev, false);
}
diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig
index b3634e10f6f5..c954001ae79e 100644
--- a/drivers/soc/qcom/Kconfig
+++ b/drivers/soc/qcom/Kconfig
@@ -86,6 +86,7 @@ config QCOM_PMIC_GLINK
depends on OF
select AUXILIARY_BUS
select QCOM_PDR_HELPERS
+ select DRM_AUX_HPD_BRIDGE
help
The Qualcomm PMIC GLINK driver provides access, over GLINK, to the
USB and battery firmware running on one of the coprocessors in
diff --git a/drivers/soc/qcom/pmic_glink_altmode.c b/drivers/soc/qcom/pmic_glink_altmode.c
index b78279e2f54c..053b7393e26a 100644
--- a/drivers/soc/qcom/pmic_glink_altmode.c
+++ b/drivers/soc/qcom/pmic_glink_altmode.c
@@ -11,7 +11,7 @@
#include <linux/mutex.h>
#include <linux/property.h>
#include <linux/soc/qcom/pdr.h>
-#include <drm/drm_bridge.h>
+#include <drm/bridge/aux-bridge.h>
#include <linux/usb/typec_altmode.h>
#include <linux/usb/typec_dp.h>
@@ -76,7 +76,7 @@ struct pmic_glink_altmode_port {
struct work_struct work;
- struct drm_bridge bridge;
+ struct device *bridge;
enum typec_orientation orientation;
u16 svid;
@@ -230,10 +230,10 @@ static void pmic_glink_altmode_worker(struct work_struct *work)
else
pmic_glink_altmode_enable_usb(altmode, alt_port);
- if (alt_port->hpd_state)
- drm_bridge_hpd_notify(&alt_port->bridge, connector_status_connected);
- else
- drm_bridge_hpd_notify(&alt_port->bridge, connector_status_disconnected);
+ drm_aux_hpd_bridge_notify(alt_port->bridge,
+ alt_port->hpd_state ?
+ connector_status_connected :
+ connector_status_disconnected);
pmic_glink_altmode_request(altmode, ALTMODE_PAN_ACK, alt_port->index);
};
@@ -365,16 +365,6 @@ static void pmic_glink_altmode_callback(const void *data, size_t len, void *priv
}
}
-static int pmic_glink_altmode_attach(struct drm_bridge *bridge,
- enum drm_bridge_attach_flags flags)
-{
- return flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR ? 0 : -EINVAL;
-}
-
-static const struct drm_bridge_funcs pmic_glink_altmode_bridge_funcs = {
- .attach = pmic_glink_altmode_attach,
-};
-
static void pmic_glink_altmode_put_retimer(void *data)
{
typec_retimer_put(data);
@@ -464,15 +454,10 @@ static int pmic_glink_altmode_probe(struct auxiliary_device *adev,
alt_port->index = port;
INIT_WORK(&alt_port->work, pmic_glink_altmode_worker);
- alt_port->bridge.funcs = &pmic_glink_altmode_bridge_funcs;
- alt_port->bridge.of_node = to_of_node(fwnode);
- alt_port->bridge.ops = DRM_BRIDGE_OP_HPD;
- alt_port->bridge.type = DRM_MODE_CONNECTOR_DisplayPort;
-
- ret = devm_drm_bridge_add(dev, &alt_port->bridge);
- if (ret) {
+ alt_port->bridge = drm_dp_hpd_bridge_register(dev, to_of_node(fwnode));
+ if (IS_ERR(alt_port->bridge)) {
fwnode_handle_put(fwnode);
- return ret;
+ return PTR_ERR(alt_port->bridge);
}
alt_port->dp_alt.svid = USB_TYPEC_DP_SID;
diff --git a/drivers/staging/sm750fb/sm750.c b/drivers/staging/sm750fb/sm750.c
index 79bcd5bd4938..04c1b32a22c5 100644
--- a/drivers/staging/sm750fb/sm750.c
+++ b/drivers/staging/sm750fb/sm750.c
@@ -663,17 +663,54 @@ static int sm750fb_set_drv(struct lynxfb_par *par)
return ret;
}
-static struct fb_ops lynxfb_ops = {
+static const struct fb_ops lynxfb_ops = {
.owner = THIS_MODULE,
+ FB_DEFAULT_IOMEM_OPS,
.fb_check_var = lynxfb_ops_check_var,
.fb_set_par = lynxfb_ops_set_par,
.fb_setcolreg = lynxfb_ops_setcolreg,
.fb_blank = lynxfb_ops_blank,
- .fb_fillrect = cfb_fillrect,
- .fb_imageblit = cfb_imageblit,
- .fb_copyarea = cfb_copyarea,
- /* cursor */
+ .fb_pan_display = lynxfb_ops_pan_display,
+};
+
+static const struct fb_ops lynxfb_ops_with_cursor = {
+ .owner = THIS_MODULE,
+ FB_DEFAULT_IOMEM_OPS,
+ .fb_check_var = lynxfb_ops_check_var,
+ .fb_set_par = lynxfb_ops_set_par,
+ .fb_setcolreg = lynxfb_ops_setcolreg,
+ .fb_blank = lynxfb_ops_blank,
+ .fb_pan_display = lynxfb_ops_pan_display,
+ .fb_cursor = lynxfb_ops_cursor,
+};
+
+static const struct fb_ops lynxfb_ops_accel = {
+ .owner = THIS_MODULE,
+ __FB_DEFAULT_IOMEM_OPS_RDWR,
+ .fb_check_var = lynxfb_ops_check_var,
+ .fb_set_par = lynxfb_ops_set_par,
+ .fb_setcolreg = lynxfb_ops_setcolreg,
+ .fb_blank = lynxfb_ops_blank,
+ .fb_pan_display = lynxfb_ops_pan_display,
+ .fb_fillrect = lynxfb_ops_fillrect,
+ .fb_copyarea = lynxfb_ops_copyarea,
+ .fb_imageblit = lynxfb_ops_imageblit,
+ __FB_DEFAULT_IOMEM_OPS_MMAP,
+};
+
+static const struct fb_ops lynxfb_ops_accel_with_cursor = {
+ .owner = THIS_MODULE,
+ __FB_DEFAULT_IOMEM_OPS_RDWR,
+ .fb_check_var = lynxfb_ops_check_var,
+ .fb_set_par = lynxfb_ops_set_par,
+ .fb_setcolreg = lynxfb_ops_setcolreg,
+ .fb_blank = lynxfb_ops_blank,
+ .fb_pan_display = lynxfb_ops_pan_display,
+ .fb_fillrect = lynxfb_ops_fillrect,
+ .fb_copyarea = lynxfb_ops_copyarea,
+ .fb_imageblit = lynxfb_ops_imageblit,
.fb_cursor = lynxfb_ops_cursor,
+ __FB_DEFAULT_IOMEM_OPS_MMAP,
};
static int lynxfb_set_fbinfo(struct fb_info *info, int index)
@@ -714,7 +751,6 @@ static int lynxfb_set_fbinfo(struct fb_info *info, int index)
par->index = index;
output->channel = &crtc->channel;
sm750fb_set_drv(par);
- lynxfb_ops.fb_pan_display = lynxfb_ops_pan_display;
/*
* set current cursor variable and proc pointer,
@@ -731,19 +767,22 @@ static int lynxfb_set_fbinfo(struct fb_info *info, int index)
crtc->cursor.vstart = sm750_dev->pvMem + crtc->cursor.offset;
memset_io(crtc->cursor.vstart, 0, crtc->cursor.size);
- if (!g_hwcursor) {
- lynxfb_ops.fb_cursor = NULL;
+ if (!g_hwcursor)
sm750_hw_cursor_disable(&crtc->cursor);
- }
/* set info->fbops, must be set before fb_find_mode */
if (!sm750_dev->accel_off) {
/* use 2d acceleration */
- lynxfb_ops.fb_fillrect = lynxfb_ops_fillrect;
- lynxfb_ops.fb_copyarea = lynxfb_ops_copyarea;
- lynxfb_ops.fb_imageblit = lynxfb_ops_imageblit;
+ if (!g_hwcursor)
+ info->fbops = &lynxfb_ops_accel;
+ else
+ info->fbops = &lynxfb_ops_accel_with_cursor;
+ } else {
+ if (!g_hwcursor)
+ info->fbops = &lynxfb_ops;
+ else
+ info->fbops = &lynxfb_ops_with_cursor;
}
- info->fbops = &lynxfb_ops;
if (!g_fbmode[index]) {
g_fbmode[index] = g_def_fbmode;
diff --git a/drivers/tee/optee/device.c b/drivers/tee/optee/device.c
index 64f0e047c23d..4b1092127694 100644
--- a/drivers/tee/optee/device.c
+++ b/drivers/tee/optee/device.c
@@ -60,7 +60,16 @@ static void optee_release_device(struct device *dev)
kfree(optee_device);
}
-static int optee_register_device(const uuid_t *device_uuid)
+static ssize_t need_supplicant_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return 0;
+}
+
+static DEVICE_ATTR_RO(need_supplicant);
+
+static int optee_register_device(const uuid_t *device_uuid, u32 func)
{
struct tee_client_device *optee_device = NULL;
int rc;
@@ -83,6 +92,10 @@ static int optee_register_device(const uuid_t *device_uuid)
put_device(&optee_device->dev);
}
+ if (func == PTA_CMD_GET_DEVICES_SUPP)
+ device_create_file(&optee_device->dev,
+ &dev_attr_need_supplicant);
+
return rc;
}
@@ -142,7 +155,7 @@ static int __optee_enumerate_devices(u32 func)
num_devices = shm_size / sizeof(uuid_t);
for (idx = 0; idx < num_devices; idx++) {
- rc = optee_register_device(&device_uuid[idx]);
+ rc = optee_register_device(&device_uuid[idx], func);
if (rc)
goto out_shm;
}
diff --git a/drivers/tty/serial/8250/8250_dw.c b/drivers/tty/serial/8250/8250_dw.c
index b94f567647cb..e6218766d0c8 100644
--- a/drivers/tty/serial/8250/8250_dw.c
+++ b/drivers/tty/serial/8250/8250_dw.c
@@ -777,6 +777,7 @@ static const struct acpi_device_id dw8250_acpi_match[] = {
{ "INT33C5", (kernel_ulong_t)&dw8250_dw_apb },
{ "INT3434", (kernel_ulong_t)&dw8250_dw_apb },
{ "INT3435", (kernel_ulong_t)&dw8250_dw_apb },
+ { "INTC10EE", (kernel_ulong_t)&dw8250_dw_apb },
{ },
};
MODULE_DEVICE_TABLE(acpi, dw8250_acpi_match);
diff --git a/drivers/tty/serial/8250/8250_early.c b/drivers/tty/serial/8250/8250_early.c
index 9837a27739fd..e3f482fd3de4 100644
--- a/drivers/tty/serial/8250/8250_early.c
+++ b/drivers/tty/serial/8250/8250_early.c
@@ -189,5 +189,6 @@ static int __init early_omap8250_setup(struct earlycon_device *device,
OF_EARLYCON_DECLARE(omap8250, "ti,omap2-uart", early_omap8250_setup);
OF_EARLYCON_DECLARE(omap8250, "ti,omap3-uart", early_omap8250_setup);
OF_EARLYCON_DECLARE(omap8250, "ti,omap4-uart", early_omap8250_setup);
+OF_EARLYCON_DECLARE(omap8250, "ti,am654-uart", early_omap8250_setup);
#endif
diff --git a/drivers/tty/serial/8250/8250_omap.c b/drivers/tty/serial/8250/8250_omap.c
index 2d42f485c987..578f35895b27 100644
--- a/drivers/tty/serial/8250/8250_omap.c
+++ b/drivers/tty/serial/8250/8250_omap.c
@@ -933,7 +933,7 @@ static void __dma_rx_do_complete(struct uart_8250_port *p)
if (priv->habit & UART_HAS_RHR_IT_DIS) {
reg = serial_in(p, UART_OMAP_IER2);
reg &= ~UART_OMAP_IER2_RHR_IT_DIS;
- serial_out(p, UART_OMAP_IER2, UART_OMAP_IER2_RHR_IT_DIS);
+ serial_out(p, UART_OMAP_IER2, reg);
}
dmaengine_tx_status(rxchan, cookie, &state);
@@ -1079,7 +1079,7 @@ static int omap_8250_rx_dma(struct uart_8250_port *p)
if (priv->habit & UART_HAS_RHR_IT_DIS) {
reg = serial_in(p, UART_OMAP_IER2);
reg |= UART_OMAP_IER2_RHR_IT_DIS;
- serial_out(p, UART_OMAP_IER2, UART_OMAP_IER2_RHR_IT_DIS);
+ serial_out(p, UART_OMAP_IER2, reg);
}
dma_async_issue_pending(dma->rxchan);
@@ -1298,10 +1298,12 @@ static int omap_8250_dma_handle_irq(struct uart_port *port)
status = serial_port_in(port, UART_LSR);
- if (priv->habit & UART_HAS_EFR2)
- am654_8250_handle_rx_dma(up, iir, status);
- else
- status = omap_8250_handle_rx_dma(up, iir, status);
+ if ((iir & 0x3f) != UART_IIR_THRI) {
+ if (priv->habit & UART_HAS_EFR2)
+ am654_8250_handle_rx_dma(up, iir, status);
+ else
+ status = omap_8250_handle_rx_dma(up, iir, status);
+ }
serial8250_modem_status(up);
if (status & UART_LSR_THRE && up->dma->tx_err) {
diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c
index 61cc24cd90e4..b7635363373e 100644
--- a/drivers/tty/serial/amba-pl011.c
+++ b/drivers/tty/serial/amba-pl011.c
@@ -218,17 +218,18 @@ static struct vendor_data vendor_st = {
/* Deals with DMA transactions */
-struct pl011_sgbuf {
- struct scatterlist sg;
- char *buf;
+struct pl011_dmabuf {
+ dma_addr_t dma;
+ size_t len;
+ char *buf;
};
struct pl011_dmarx_data {
struct dma_chan *chan;
struct completion complete;
bool use_buf_b;
- struct pl011_sgbuf sgbuf_a;
- struct pl011_sgbuf sgbuf_b;
+ struct pl011_dmabuf dbuf_a;
+ struct pl011_dmabuf dbuf_b;
dma_cookie_t cookie;
bool running;
struct timer_list timer;
@@ -241,7 +242,8 @@ struct pl011_dmarx_data {
struct pl011_dmatx_data {
struct dma_chan *chan;
- struct scatterlist sg;
+ dma_addr_t dma;
+ size_t len;
char *buf;
bool queued;
};
@@ -366,32 +368,24 @@ static int pl011_fifo_to_tty(struct uart_amba_port *uap)
#define PL011_DMA_BUFFER_SIZE PAGE_SIZE
-static int pl011_sgbuf_init(struct dma_chan *chan, struct pl011_sgbuf *sg,
+static int pl011_dmabuf_init(struct dma_chan *chan, struct pl011_dmabuf *db,
enum dma_data_direction dir)
{
- dma_addr_t dma_addr;
-
- sg->buf = dma_alloc_coherent(chan->device->dev,
- PL011_DMA_BUFFER_SIZE, &dma_addr, GFP_KERNEL);
- if (!sg->buf)
+ db->buf = dma_alloc_coherent(chan->device->dev, PL011_DMA_BUFFER_SIZE,
+ &db->dma, GFP_KERNEL);
+ if (!db->buf)
return -ENOMEM;
-
- sg_init_table(&sg->sg, 1);
- sg_set_page(&sg->sg, phys_to_page(dma_addr),
- PL011_DMA_BUFFER_SIZE, offset_in_page(dma_addr));
- sg_dma_address(&sg->sg) = dma_addr;
- sg_dma_len(&sg->sg) = PL011_DMA_BUFFER_SIZE;
+ db->len = PL011_DMA_BUFFER_SIZE;
return 0;
}
-static void pl011_sgbuf_free(struct dma_chan *chan, struct pl011_sgbuf *sg,
+static void pl011_dmabuf_free(struct dma_chan *chan, struct pl011_dmabuf *db,
enum dma_data_direction dir)
{
- if (sg->buf) {
+ if (db->buf) {
dma_free_coherent(chan->device->dev,
- PL011_DMA_BUFFER_SIZE, sg->buf,
- sg_dma_address(&sg->sg));
+ PL011_DMA_BUFFER_SIZE, db->buf, db->dma);
}
}
@@ -552,8 +546,8 @@ static void pl011_dma_tx_callback(void *data)
uart_port_lock_irqsave(&uap->port, &flags);
if (uap->dmatx.queued)
- dma_unmap_sg(dmatx->chan->device->dev, &dmatx->sg, 1,
- DMA_TO_DEVICE);
+ dma_unmap_single(dmatx->chan->device->dev, dmatx->dma,
+ dmatx->len, DMA_TO_DEVICE);
dmacr = uap->dmacr;
uap->dmacr = dmacr & ~UART011_TXDMAE;
@@ -639,18 +633,19 @@ static int pl011_dma_tx_refill(struct uart_amba_port *uap)
memcpy(&dmatx->buf[first], &xmit->buf[0], second);
}
- dmatx->sg.length = count;
-
- if (dma_map_sg(dma_dev->dev, &dmatx->sg, 1, DMA_TO_DEVICE) != 1) {
+ dmatx->len = count;
+ dmatx->dma = dma_map_single(dma_dev->dev, dmatx->buf, count,
+ DMA_TO_DEVICE);
+ if (dmatx->dma == DMA_MAPPING_ERROR) {
uap->dmatx.queued = false;
dev_dbg(uap->port.dev, "unable to map TX DMA\n");
return -EBUSY;
}
- desc = dmaengine_prep_slave_sg(chan, &dmatx->sg, 1, DMA_MEM_TO_DEV,
+ desc = dmaengine_prep_slave_single(chan, dmatx->dma, dmatx->len, DMA_MEM_TO_DEV,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!desc) {
- dma_unmap_sg(dma_dev->dev, &dmatx->sg, 1, DMA_TO_DEVICE);
+ dma_unmap_single(dma_dev->dev, dmatx->dma, dmatx->len, DMA_TO_DEVICE);
uap->dmatx.queued = false;
/*
* If DMA cannot be used right now, we complete this
@@ -813,8 +808,8 @@ __acquires(&uap->port.lock)
dmaengine_terminate_async(uap->dmatx.chan);
if (uap->dmatx.queued) {
- dma_unmap_sg(uap->dmatx.chan->device->dev, &uap->dmatx.sg, 1,
- DMA_TO_DEVICE);
+ dma_unmap_single(uap->dmatx.chan->device->dev, uap->dmatx.dma,
+ uap->dmatx.len, DMA_TO_DEVICE);
uap->dmatx.queued = false;
uap->dmacr &= ~UART011_TXDMAE;
pl011_write(uap->dmacr, uap, REG_DMACR);
@@ -828,15 +823,15 @@ static int pl011_dma_rx_trigger_dma(struct uart_amba_port *uap)
struct dma_chan *rxchan = uap->dmarx.chan;
struct pl011_dmarx_data *dmarx = &uap->dmarx;
struct dma_async_tx_descriptor *desc;
- struct pl011_sgbuf *sgbuf;
+ struct pl011_dmabuf *dbuf;
if (!rxchan)
return -EIO;
/* Start the RX DMA job */
- sgbuf = uap->dmarx.use_buf_b ?
- &uap->dmarx.sgbuf_b : &uap->dmarx.sgbuf_a;
- desc = dmaengine_prep_slave_sg(rxchan, &sgbuf->sg, 1,
+ dbuf = uap->dmarx.use_buf_b ?
+ &uap->dmarx.dbuf_b : &uap->dmarx.dbuf_a;
+ desc = dmaengine_prep_slave_single(rxchan, dbuf->dma, dbuf->len,
DMA_DEV_TO_MEM,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
/*
@@ -876,8 +871,8 @@ static void pl011_dma_rx_chars(struct uart_amba_port *uap,
bool readfifo)
{
struct tty_port *port = &uap->port.state->port;
- struct pl011_sgbuf *sgbuf = use_buf_b ?
- &uap->dmarx.sgbuf_b : &uap->dmarx.sgbuf_a;
+ struct pl011_dmabuf *dbuf = use_buf_b ?
+ &uap->dmarx.dbuf_b : &uap->dmarx.dbuf_a;
int dma_count = 0;
u32 fifotaken = 0; /* only used for vdbg() */
@@ -886,7 +881,7 @@ static void pl011_dma_rx_chars(struct uart_amba_port *uap,
if (uap->dmarx.poll_rate) {
/* The data can be taken by polling */
- dmataken = sgbuf->sg.length - dmarx->last_residue;
+ dmataken = dbuf->len - dmarx->last_residue;
/* Recalculate the pending size */
if (pending >= dmataken)
pending -= dmataken;
@@ -900,7 +895,7 @@ static void pl011_dma_rx_chars(struct uart_amba_port *uap,
* Note that tty_insert_flip_buf() tries to take as many chars
* as it can.
*/
- dma_count = tty_insert_flip_string(port, sgbuf->buf + dmataken,
+ dma_count = tty_insert_flip_string(port, dbuf->buf + dmataken,
pending);
uap->port.icount.rx += dma_count;
@@ -911,7 +906,7 @@ static void pl011_dma_rx_chars(struct uart_amba_port *uap,
/* Reset the last_residue for Rx DMA poll */
if (uap->dmarx.poll_rate)
- dmarx->last_residue = sgbuf->sg.length;
+ dmarx->last_residue = dbuf->len;
/*
* Only continue with trying to read the FIFO if all DMA chars have
@@ -946,8 +941,8 @@ static void pl011_dma_rx_irq(struct uart_amba_port *uap)
{
struct pl011_dmarx_data *dmarx = &uap->dmarx;
struct dma_chan *rxchan = dmarx->chan;
- struct pl011_sgbuf *sgbuf = dmarx->use_buf_b ?
- &dmarx->sgbuf_b : &dmarx->sgbuf_a;
+ struct pl011_dmabuf *dbuf = dmarx->use_buf_b ?
+ &dmarx->dbuf_b : &dmarx->dbuf_a;
size_t pending;
struct dma_tx_state state;
enum dma_status dmastat;
@@ -969,7 +964,7 @@ static void pl011_dma_rx_irq(struct uart_amba_port *uap)
pl011_write(uap->dmacr, uap, REG_DMACR);
uap->dmarx.running = false;
- pending = sgbuf->sg.length - state.residue;
+ pending = dbuf->len - state.residue;
BUG_ON(pending > PL011_DMA_BUFFER_SIZE);
/* Then we terminate the transfer - we now know our residue */
dmaengine_terminate_all(rxchan);
@@ -996,8 +991,8 @@ static void pl011_dma_rx_callback(void *data)
struct pl011_dmarx_data *dmarx = &uap->dmarx;
struct dma_chan *rxchan = dmarx->chan;
bool lastbuf = dmarx->use_buf_b;
- struct pl011_sgbuf *sgbuf = dmarx->use_buf_b ?
- &dmarx->sgbuf_b : &dmarx->sgbuf_a;
+ struct pl011_dmabuf *dbuf = dmarx->use_buf_b ?
+ &dmarx->dbuf_b : &dmarx->dbuf_a;
size_t pending;
struct dma_tx_state state;
int ret;
@@ -1015,7 +1010,7 @@ static void pl011_dma_rx_callback(void *data)
* the DMA irq handler. So we check the residue here.
*/
rxchan->device->device_tx_status(rxchan, dmarx->cookie, &state);
- pending = sgbuf->sg.length - state.residue;
+ pending = dbuf->len - state.residue;
BUG_ON(pending > PL011_DMA_BUFFER_SIZE);
/* Then we terminate the transfer - we now know our residue */
dmaengine_terminate_all(rxchan);
@@ -1067,16 +1062,16 @@ static void pl011_dma_rx_poll(struct timer_list *t)
unsigned long flags;
unsigned int dmataken = 0;
unsigned int size = 0;
- struct pl011_sgbuf *sgbuf;
+ struct pl011_dmabuf *dbuf;
int dma_count;
struct dma_tx_state state;
- sgbuf = dmarx->use_buf_b ? &uap->dmarx.sgbuf_b : &uap->dmarx.sgbuf_a;
+ dbuf = dmarx->use_buf_b ? &uap->dmarx.dbuf_b : &uap->dmarx.dbuf_a;
rxchan->device->device_tx_status(rxchan, dmarx->cookie, &state);
if (likely(state.residue < dmarx->last_residue)) {
- dmataken = sgbuf->sg.length - dmarx->last_residue;
+ dmataken = dbuf->len - dmarx->last_residue;
size = dmarx->last_residue - state.residue;
- dma_count = tty_insert_flip_string(port, sgbuf->buf + dmataken,
+ dma_count = tty_insert_flip_string(port, dbuf->buf + dmataken,
size);
if (dma_count == size)
dmarx->last_residue = state.residue;
@@ -1123,7 +1118,7 @@ static void pl011_dma_startup(struct uart_amba_port *uap)
return;
}
- sg_init_one(&uap->dmatx.sg, uap->dmatx.buf, PL011_DMA_BUFFER_SIZE);
+ uap->dmatx.len = PL011_DMA_BUFFER_SIZE;
/* The DMA buffer is now the FIFO the TTY subsystem can use */
uap->port.fifosize = PL011_DMA_BUFFER_SIZE;
@@ -1133,7 +1128,7 @@ static void pl011_dma_startup(struct uart_amba_port *uap)
goto skip_rx;
/* Allocate and map DMA RX buffers */
- ret = pl011_sgbuf_init(uap->dmarx.chan, &uap->dmarx.sgbuf_a,
+ ret = pl011_dmabuf_init(uap->dmarx.chan, &uap->dmarx.dbuf_a,
DMA_FROM_DEVICE);
if (ret) {
dev_err(uap->port.dev, "failed to init DMA %s: %d\n",
@@ -1141,12 +1136,12 @@ static void pl011_dma_startup(struct uart_amba_port *uap)
goto skip_rx;
}
- ret = pl011_sgbuf_init(uap->dmarx.chan, &uap->dmarx.sgbuf_b,
+ ret = pl011_dmabuf_init(uap->dmarx.chan, &uap->dmarx.dbuf_b,
DMA_FROM_DEVICE);
if (ret) {
dev_err(uap->port.dev, "failed to init DMA %s: %d\n",
"RX buffer B", ret);
- pl011_sgbuf_free(uap->dmarx.chan, &uap->dmarx.sgbuf_a,
+ pl011_dmabuf_free(uap->dmarx.chan, &uap->dmarx.dbuf_a,
DMA_FROM_DEVICE);
goto skip_rx;
}
@@ -1200,8 +1195,9 @@ static void pl011_dma_shutdown(struct uart_amba_port *uap)
/* In theory, this should already be done by pl011_dma_flush_buffer */
dmaengine_terminate_all(uap->dmatx.chan);
if (uap->dmatx.queued) {
- dma_unmap_sg(uap->dmatx.chan->device->dev, &uap->dmatx.sg, 1,
- DMA_TO_DEVICE);
+ dma_unmap_single(uap->dmatx.chan->device->dev,
+ uap->dmatx.dma, uap->dmatx.len,
+ DMA_TO_DEVICE);
uap->dmatx.queued = false;
}
@@ -1212,8 +1208,8 @@ static void pl011_dma_shutdown(struct uart_amba_port *uap)
if (uap->using_rx_dma) {
dmaengine_terminate_all(uap->dmarx.chan);
/* Clean up the RX DMA */
- pl011_sgbuf_free(uap->dmarx.chan, &uap->dmarx.sgbuf_a, DMA_FROM_DEVICE);
- pl011_sgbuf_free(uap->dmarx.chan, &uap->dmarx.sgbuf_b, DMA_FROM_DEVICE);
+ pl011_dmabuf_free(uap->dmarx.chan, &uap->dmarx.dbuf_a, DMA_FROM_DEVICE);
+ pl011_dmabuf_free(uap->dmarx.chan, &uap->dmarx.dbuf_b, DMA_FROM_DEVICE);
if (uap->dmarx.poll_rate)
del_timer_sync(&uap->dmarx.timer);
uap->using_rx_dma = false;
diff --git a/drivers/tty/serial/ma35d1_serial.c b/drivers/tty/serial/ma35d1_serial.c
index a6a7c405892e..21b574f78b86 100644
--- a/drivers/tty/serial/ma35d1_serial.c
+++ b/drivers/tty/serial/ma35d1_serial.c
@@ -552,11 +552,19 @@ static void ma35d1serial_console_putchar(struct uart_port *port, unsigned char c
*/
static void ma35d1serial_console_write(struct console *co, const char *s, u32 count)
{
- struct uart_ma35d1_port *up = &ma35d1serial_ports[co->index];
+ struct uart_ma35d1_port *up;
unsigned long flags;
int locked = 1;
u32 ier;
+ if ((co->index < 0) || (co->index >= MA35_UART_NR)) {
+ pr_warn("Failed to write on ononsole port %x, out of range\n",
+ co->index);
+ return;
+ }
+
+ up = &ma35d1serial_ports[co->index];
+
if (up->port.sysrq)
locked = 0;
else if (oops_in_progress)
diff --git a/drivers/tty/serial/sc16is7xx.c b/drivers/tty/serial/sc16is7xx.c
index db2bb1c0d36c..cf0c6120d30e 100644
--- a/drivers/tty/serial/sc16is7xx.c
+++ b/drivers/tty/serial/sc16is7xx.c
@@ -766,6 +766,18 @@ static bool sc16is7xx_port_irq(struct sc16is7xx_port *s, int portno)
case SC16IS7XX_IIR_RTOI_SRC:
case SC16IS7XX_IIR_XOFFI_SRC:
rxlen = sc16is7xx_port_read(port, SC16IS7XX_RXLVL_REG);
+
+ /*
+ * There is a silicon bug that makes the chip report a
+ * time-out interrupt but no data in the FIFO. This is
+ * described in errata section 18.1.4.
+ *
+ * When this happens, read one byte from the FIFO to
+ * clear the interrupt.
+ */
+ if (iir == SC16IS7XX_IIR_RTOI_SRC && !rxlen)
+ rxlen = 1;
+
if (rxlen)
sc16is7xx_handle_rx(port, rxlen, iir);
break;
diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c
index 8b1031fb0a44..bce0d2a9a7f3 100644
--- a/drivers/ufs/core/ufshcd.c
+++ b/drivers/ufs/core/ufshcd.c
@@ -6444,11 +6444,24 @@ static bool ufshcd_abort_one(struct request *rq, void *priv)
struct scsi_device *sdev = cmd->device;
struct Scsi_Host *shost = sdev->host;
struct ufs_hba *hba = shost_priv(shost);
+ struct ufshcd_lrb *lrbp = &hba->lrb[tag];
+ struct ufs_hw_queue *hwq;
+ unsigned long flags;
*ret = ufshcd_try_to_abort_task(hba, tag);
dev_err(hba->dev, "Aborting tag %d / CDB %#02x %s\n", tag,
hba->lrb[tag].cmd ? hba->lrb[tag].cmd->cmnd[0] : -1,
*ret ? "failed" : "succeeded");
+
+ /* Release cmd in MCQ mode if abort succeeds */
+ if (is_mcq_enabled(hba) && (*ret == 0)) {
+ hwq = ufshcd_mcq_req_to_hwq(hba, scsi_cmd_to_rq(lrbp->cmd));
+ spin_lock_irqsave(&hwq->cq_lock, flags);
+ if (ufshcd_cmd_inflight(lrbp->cmd))
+ ufshcd_release_scsi_cmd(hba, lrbp);
+ spin_unlock_irqrestore(&hwq->cq_lock, flags);
+ }
+
return *ret == 0;
}
diff --git a/drivers/usb/gadget/function/f_hid.c b/drivers/usb/gadget/function/f_hid.c
index ea85e2c701a1..3c8a9dd585c0 100644
--- a/drivers/usb/gadget/function/f_hid.c
+++ b/drivers/usb/gadget/function/f_hid.c
@@ -92,6 +92,7 @@ static void hidg_release(struct device *dev)
{
struct f_hidg *hidg = container_of(dev, struct f_hidg, dev);
+ kfree(hidg->report_desc);
kfree(hidg->set_report_buf);
kfree(hidg);
}
@@ -1287,9 +1288,9 @@ static struct usb_function *hidg_alloc(struct usb_function_instance *fi)
hidg->report_length = opts->report_length;
hidg->report_desc_length = opts->report_desc_length;
if (opts->report_desc) {
- hidg->report_desc = devm_kmemdup(&hidg->dev, opts->report_desc,
- opts->report_desc_length,
- GFP_KERNEL);
+ hidg->report_desc = kmemdup(opts->report_desc,
+ opts->report_desc_length,
+ GFP_KERNEL);
if (!hidg->report_desc) {
ret = -ENOMEM;
goto err_put_device;
diff --git a/drivers/usb/gadget/udc/core.c b/drivers/usb/gadget/udc/core.c
index ded9531f141b..d59f94464b87 100644
--- a/drivers/usb/gadget/udc/core.c
+++ b/drivers/usb/gadget/udc/core.c
@@ -1646,8 +1646,6 @@ static void gadget_unbind_driver(struct device *dev)
dev_dbg(&udc->dev, "unbinding gadget driver [%s]\n", driver->function);
- kobject_uevent(&udc->dev.kobj, KOBJ_CHANGE);
-
udc->allow_connect = false;
cancel_work_sync(&udc->vbus_work);
mutex_lock(&udc->connect_lock);
@@ -1667,6 +1665,8 @@ static void gadget_unbind_driver(struct device *dev)
driver->is_bound = false;
udc->driver = NULL;
mutex_unlock(&udc_lock);
+
+ kobject_uevent(&udc->dev.kobj, KOBJ_CHANGE);
}
/* ------------------------------------------------------------------------- */
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index 95ed9404f6f8..d6fc08e5db8f 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -535,8 +535,6 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
/* xHC spec requires PCI devices to support D3hot and D3cold */
if (xhci->hci_version >= 0x120)
xhci->quirks |= XHCI_DEFAULT_PM_RUNTIME_ALLOW;
- else if (pdev->vendor == PCI_VENDOR_ID_AMD && xhci->hci_version >= 0x110)
- xhci->quirks |= XHCI_DEFAULT_PM_RUNTIME_ALLOW;
if (xhci->quirks & XHCI_RESET_ON_RESUME)
xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
diff --git a/drivers/usb/typec/class.c b/drivers/usb/typec/class.c
index 2e0451bd336e..16a670828dde 100644
--- a/drivers/usb/typec/class.c
+++ b/drivers/usb/typec/class.c
@@ -267,7 +267,7 @@ static void typec_altmode_put_partner(struct altmode *altmode)
if (!partner)
return;
- adev = &partner->adev;
+ adev = &altmode->adev;
if (is_typec_plug(adev->dev.parent)) {
struct typec_plug *plug = to_typec_plug(adev->dev.parent);
@@ -497,7 +497,8 @@ static void typec_altmode_release(struct device *dev)
{
struct altmode *alt = to_altmode(to_typec_altmode(dev));
- typec_altmode_put_partner(alt);
+ if (!is_typec_port(dev->parent))
+ typec_altmode_put_partner(alt);
altmode_id_remove(alt->adev.dev.parent, alt->id);
kfree(alt);
diff --git a/drivers/usb/typec/mux/Kconfig b/drivers/usb/typec/mux/Kconfig
index 816b9bd08355..5120942f309d 100644
--- a/drivers/usb/typec/mux/Kconfig
+++ b/drivers/usb/typec/mux/Kconfig
@@ -40,7 +40,7 @@ config TYPEC_MUX_NB7VPQ904M
tristate "On Semiconductor NB7VPQ904M Type-C redriver driver"
depends on I2C
depends on DRM || DRM=n
- select DRM_PANEL_BRIDGE if DRM
+ select DRM_AUX_BRIDGE if DRM_BRIDGE
select REGMAP_I2C
help
Say Y or M if your system has a On Semiconductor NB7VPQ904M Type-C
diff --git a/drivers/usb/typec/mux/nb7vpq904m.c b/drivers/usb/typec/mux/nb7vpq904m.c
index cda206cf0c38..b17826713753 100644
--- a/drivers/usb/typec/mux/nb7vpq904m.c
+++ b/drivers/usb/typec/mux/nb7vpq904m.c
@@ -11,7 +11,7 @@
#include <linux/regmap.h>
#include <linux/bitfield.h>
#include <linux/of_graph.h>
-#include <drm/drm_bridge.h>
+#include <drm/bridge/aux-bridge.h>
#include <linux/usb/typec_dp.h>
#include <linux/usb/typec_mux.h>
#include <linux/usb/typec_retimer.h>
@@ -70,8 +70,6 @@ struct nb7vpq904m {
bool swap_data_lanes;
struct typec_switch *typec_switch;
- struct drm_bridge bridge;
-
struct mutex lock; /* protect non-concurrent retimer & switch */
enum typec_orientation orientation;
@@ -297,44 +295,6 @@ static int nb7vpq904m_retimer_set(struct typec_retimer *retimer, struct typec_re
return ret;
}
-#if IS_ENABLED(CONFIG_OF) && IS_ENABLED(CONFIG_DRM_PANEL_BRIDGE)
-static int nb7vpq904m_bridge_attach(struct drm_bridge *bridge,
- enum drm_bridge_attach_flags flags)
-{
- struct nb7vpq904m *nb7 = container_of(bridge, struct nb7vpq904m, bridge);
- struct drm_bridge *next_bridge;
-
- if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR))
- return -EINVAL;
-
- next_bridge = devm_drm_of_get_bridge(&nb7->client->dev, nb7->client->dev.of_node, 0, 0);
- if (IS_ERR(next_bridge)) {
- dev_err(&nb7->client->dev, "failed to acquire drm_bridge: %pe\n", next_bridge);
- return PTR_ERR(next_bridge);
- }
-
- return drm_bridge_attach(bridge->encoder, next_bridge, bridge,
- DRM_BRIDGE_ATTACH_NO_CONNECTOR);
-}
-
-static const struct drm_bridge_funcs nb7vpq904m_bridge_funcs = {
- .attach = nb7vpq904m_bridge_attach,
-};
-
-static int nb7vpq904m_register_bridge(struct nb7vpq904m *nb7)
-{
- nb7->bridge.funcs = &nb7vpq904m_bridge_funcs;
- nb7->bridge.of_node = nb7->client->dev.of_node;
-
- return devm_drm_bridge_add(&nb7->client->dev, &nb7->bridge);
-}
-#else
-static int nb7vpq904m_register_bridge(struct nb7vpq904m *nb7)
-{
- return 0;
-}
-#endif
-
static const struct regmap_config nb7_regmap = {
.max_register = 0x1f,
.reg_bits = 8,
@@ -461,7 +421,7 @@ static int nb7vpq904m_probe(struct i2c_client *client)
gpiod_set_value(nb7->enable_gpio, 1);
- ret = nb7vpq904m_register_bridge(nb7);
+ ret = drm_aux_bridge_register(dev);
if (ret)
goto err_disable_gpio;
diff --git a/drivers/usb/typec/tcpm/Kconfig b/drivers/usb/typec/tcpm/Kconfig
index 0b2993fef564..64d5421c69e6 100644
--- a/drivers/usb/typec/tcpm/Kconfig
+++ b/drivers/usb/typec/tcpm/Kconfig
@@ -80,6 +80,7 @@ config TYPEC_QCOM_PMIC
tristate "Qualcomm PMIC USB Type-C Port Controller Manager driver"
depends on ARCH_QCOM || COMPILE_TEST
depends on DRM || DRM=n
+ select DRM_AUX_HPD_BRIDGE if DRM_BRIDGE
help
A Type-C port and Power Delivery driver which aggregates two
discrete pieces of silicon in the PM8150b PMIC block: the
diff --git a/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec.c b/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec.c
index 581199d37b49..1a2b4bddaa97 100644
--- a/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec.c
+++ b/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec.c
@@ -18,7 +18,7 @@
#include <linux/usb/tcpm.h>
#include <linux/usb/typec_mux.h>
-#include <drm/drm_bridge.h>
+#include <drm/bridge/aux-bridge.h>
#include "qcom_pmic_typec_pdphy.h"
#include "qcom_pmic_typec_port.h"
@@ -36,7 +36,6 @@ struct pmic_typec {
struct pmic_typec_port *pmic_typec_port;
bool vbus_enabled;
struct mutex lock; /* VBUS state serialization */
- struct drm_bridge bridge;
};
#define tcpc_to_tcpm(_tcpc_) container_of(_tcpc_, struct pmic_typec, tcpc)
@@ -150,35 +149,6 @@ static int qcom_pmic_typec_init(struct tcpc_dev *tcpc)
return 0;
}
-#if IS_ENABLED(CONFIG_DRM)
-static int qcom_pmic_typec_attach(struct drm_bridge *bridge,
- enum drm_bridge_attach_flags flags)
-{
- return flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR ? 0 : -EINVAL;
-}
-
-static const struct drm_bridge_funcs qcom_pmic_typec_bridge_funcs = {
- .attach = qcom_pmic_typec_attach,
-};
-
-static int qcom_pmic_typec_init_drm(struct pmic_typec *tcpm)
-{
- tcpm->bridge.funcs = &qcom_pmic_typec_bridge_funcs;
-#ifdef CONFIG_OF
- tcpm->bridge.of_node = of_get_child_by_name(tcpm->dev->of_node, "connector");
-#endif
- tcpm->bridge.ops = DRM_BRIDGE_OP_HPD;
- tcpm->bridge.type = DRM_MODE_CONNECTOR_DisplayPort;
-
- return devm_drm_bridge_add(tcpm->dev, &tcpm->bridge);
-}
-#else
-static int qcom_pmic_typec_init_drm(struct pmic_typec *tcpm)
-{
- return 0;
-}
-#endif
-
static int qcom_pmic_typec_probe(struct platform_device *pdev)
{
struct pmic_typec *tcpm;
@@ -186,6 +156,7 @@ static int qcom_pmic_typec_probe(struct platform_device *pdev)
struct device_node *np = dev->of_node;
const struct pmic_typec_resources *res;
struct regmap *regmap;
+ struct device *bridge_dev;
u32 base[2];
int ret;
@@ -241,14 +212,14 @@ static int qcom_pmic_typec_probe(struct platform_device *pdev)
mutex_init(&tcpm->lock);
platform_set_drvdata(pdev, tcpm);
- ret = qcom_pmic_typec_init_drm(tcpm);
- if (ret)
- return ret;
-
tcpm->tcpc.fwnode = device_get_named_child_node(tcpm->dev, "connector");
if (!tcpm->tcpc.fwnode)
return -EINVAL;
+ bridge_dev = drm_dp_hpd_bridge_register(tcpm->dev, to_of_node(tcpm->tcpc.fwnode));
+ if (IS_ERR(bridge_dev))
+ return PTR_ERR(bridge_dev);
+
tcpm->tcpm_port = tcpm_register_port(tcpm->dev, &tcpm->tcpc);
if (IS_ERR(tcpm->tcpm_port)) {
ret = PTR_ERR(tcpm->tcpm_port);
diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.c b/drivers/vdpa/mlx5/net/mlx5_vnet.c
index 12ac3397f39b..26ba7da6b410 100644
--- a/drivers/vdpa/mlx5/net/mlx5_vnet.c
+++ b/drivers/vdpa/mlx5/net/mlx5_vnet.c
@@ -2815,13 +2815,18 @@ static int setup_cvq_vring(struct mlx5_vdpa_dev *mvdev)
struct mlx5_control_vq *cvq = &mvdev->cvq;
int err = 0;
- if (mvdev->actual_features & BIT_ULL(VIRTIO_NET_F_CTRL_VQ))
+ if (mvdev->actual_features & BIT_ULL(VIRTIO_NET_F_CTRL_VQ)) {
+ u16 idx = cvq->vring.last_avail_idx;
+
err = vringh_init_iotlb(&cvq->vring, mvdev->actual_features,
MLX5_CVQ_MAX_ENT, false,
(struct vring_desc *)(uintptr_t)cvq->desc_addr,
(struct vring_avail *)(uintptr_t)cvq->driver_addr,
(struct vring_used *)(uintptr_t)cvq->device_addr);
+ if (!err)
+ cvq->vring.last_avail_idx = cvq->vring.last_used_idx = idx;
+ }
return err;
}
diff --git a/drivers/vdpa/pds/debugfs.c b/drivers/vdpa/pds/debugfs.c
index 9b04aad6ec35..c328e694f6e7 100644
--- a/drivers/vdpa/pds/debugfs.c
+++ b/drivers/vdpa/pds/debugfs.c
@@ -261,7 +261,7 @@ void pds_vdpa_debugfs_add_vdpadev(struct pds_vdpa_aux *vdpa_aux)
debugfs_create_file("config", 0400, vdpa_aux->dentry, vdpa_aux->pdsv, &config_fops);
for (i = 0; i < vdpa_aux->pdsv->num_vqs; i++) {
- char name[8];
+ char name[16];
snprintf(name, sizeof(name), "vq%02d", i);
debugfs_create_file(name, 0400, vdpa_aux->dentry,
diff --git a/drivers/vdpa/pds/vdpa_dev.c b/drivers/vdpa/pds/vdpa_dev.c
index 52b2449182ad..25c0fe5ec3d5 100644
--- a/drivers/vdpa/pds/vdpa_dev.c
+++ b/drivers/vdpa/pds/vdpa_dev.c
@@ -318,9 +318,8 @@ static int pds_vdpa_set_driver_features(struct vdpa_device *vdpa_dev, u64 featur
return -EOPNOTSUPP;
}
- pdsv->negotiated_features = nego_features;
-
driver_features = pds_vdpa_get_driver_features(vdpa_dev);
+ pdsv->negotiated_features = nego_features;
dev_dbg(dev, "%s: %#llx => %#llx\n",
__func__, driver_features, nego_features);
@@ -461,8 +460,10 @@ static void pds_vdpa_set_status(struct vdpa_device *vdpa_dev, u8 status)
pds_vdpa_cmd_set_status(pdsv, status);
- /* Note: still working with FW on the need for this reset cmd */
if (status == 0) {
+ struct vdpa_callback null_cb = { };
+
+ pds_vdpa_set_config_cb(vdpa_dev, &null_cb);
pds_vdpa_cmd_reset(pdsv);
for (i = 0; i < pdsv->num_vqs; i++) {
diff --git a/drivers/vfio/pci/pds/pci_drv.c b/drivers/vfio/pci/pds/pci_drv.c
index dd8c00c895a2..a34dda516629 100644
--- a/drivers/vfio/pci/pds/pci_drv.c
+++ b/drivers/vfio/pci/pds/pci_drv.c
@@ -55,10 +55,10 @@ static void pds_vfio_recovery(struct pds_vfio_pci_device *pds_vfio)
* VFIO_DEVICE_STATE_RUNNING.
*/
if (deferred_reset_needed) {
- spin_lock(&pds_vfio->reset_lock);
+ mutex_lock(&pds_vfio->reset_mutex);
pds_vfio->deferred_reset = true;
pds_vfio->deferred_reset_state = VFIO_DEVICE_STATE_ERROR;
- spin_unlock(&pds_vfio->reset_lock);
+ mutex_unlock(&pds_vfio->reset_mutex);
}
}
diff --git a/drivers/vfio/pci/pds/vfio_dev.c b/drivers/vfio/pci/pds/vfio_dev.c
index 649b18ee394b..4c351c59d05a 100644
--- a/drivers/vfio/pci/pds/vfio_dev.c
+++ b/drivers/vfio/pci/pds/vfio_dev.c
@@ -29,7 +29,7 @@ struct pds_vfio_pci_device *pds_vfio_pci_drvdata(struct pci_dev *pdev)
void pds_vfio_state_mutex_unlock(struct pds_vfio_pci_device *pds_vfio)
{
again:
- spin_lock(&pds_vfio->reset_lock);
+ mutex_lock(&pds_vfio->reset_mutex);
if (pds_vfio->deferred_reset) {
pds_vfio->deferred_reset = false;
if (pds_vfio->state == VFIO_DEVICE_STATE_ERROR) {
@@ -39,23 +39,23 @@ again:
}
pds_vfio->state = pds_vfio->deferred_reset_state;
pds_vfio->deferred_reset_state = VFIO_DEVICE_STATE_RUNNING;
- spin_unlock(&pds_vfio->reset_lock);
+ mutex_unlock(&pds_vfio->reset_mutex);
goto again;
}
mutex_unlock(&pds_vfio->state_mutex);
- spin_unlock(&pds_vfio->reset_lock);
+ mutex_unlock(&pds_vfio->reset_mutex);
}
void pds_vfio_reset(struct pds_vfio_pci_device *pds_vfio)
{
- spin_lock(&pds_vfio->reset_lock);
+ mutex_lock(&pds_vfio->reset_mutex);
pds_vfio->deferred_reset = true;
pds_vfio->deferred_reset_state = VFIO_DEVICE_STATE_RUNNING;
if (!mutex_trylock(&pds_vfio->state_mutex)) {
- spin_unlock(&pds_vfio->reset_lock);
+ mutex_unlock(&pds_vfio->reset_mutex);
return;
}
- spin_unlock(&pds_vfio->reset_lock);
+ mutex_unlock(&pds_vfio->reset_mutex);
pds_vfio_state_mutex_unlock(pds_vfio);
}
@@ -155,6 +155,9 @@ static int pds_vfio_init_device(struct vfio_device *vdev)
pds_vfio->vf_id = vf_id;
+ mutex_init(&pds_vfio->state_mutex);
+ mutex_init(&pds_vfio->reset_mutex);
+
vdev->migration_flags = VFIO_MIGRATION_STOP_COPY | VFIO_MIGRATION_P2P;
vdev->mig_ops = &pds_vfio_lm_ops;
vdev->log_ops = &pds_vfio_log_ops;
@@ -168,6 +171,17 @@ static int pds_vfio_init_device(struct vfio_device *vdev)
return 0;
}
+static void pds_vfio_release_device(struct vfio_device *vdev)
+{
+ struct pds_vfio_pci_device *pds_vfio =
+ container_of(vdev, struct pds_vfio_pci_device,
+ vfio_coredev.vdev);
+
+ mutex_destroy(&pds_vfio->state_mutex);
+ mutex_destroy(&pds_vfio->reset_mutex);
+ vfio_pci_core_release_dev(vdev);
+}
+
static int pds_vfio_open_device(struct vfio_device *vdev)
{
struct pds_vfio_pci_device *pds_vfio =
@@ -179,7 +193,6 @@ static int pds_vfio_open_device(struct vfio_device *vdev)
if (err)
return err;
- mutex_init(&pds_vfio->state_mutex);
pds_vfio->state = VFIO_DEVICE_STATE_RUNNING;
pds_vfio->deferred_reset_state = VFIO_DEVICE_STATE_RUNNING;
@@ -199,14 +212,13 @@ static void pds_vfio_close_device(struct vfio_device *vdev)
pds_vfio_put_save_file(pds_vfio);
pds_vfio_dirty_disable(pds_vfio, true);
mutex_unlock(&pds_vfio->state_mutex);
- mutex_destroy(&pds_vfio->state_mutex);
vfio_pci_core_close_device(vdev);
}
static const struct vfio_device_ops pds_vfio_ops = {
.name = "pds-vfio",
.init = pds_vfio_init_device,
- .release = vfio_pci_core_release_dev,
+ .release = pds_vfio_release_device,
.open_device = pds_vfio_open_device,
.close_device = pds_vfio_close_device,
.ioctl = vfio_pci_core_ioctl,
diff --git a/drivers/vfio/pci/pds/vfio_dev.h b/drivers/vfio/pci/pds/vfio_dev.h
index b8f2d667608f..e7b01080a1ec 100644
--- a/drivers/vfio/pci/pds/vfio_dev.h
+++ b/drivers/vfio/pci/pds/vfio_dev.h
@@ -18,7 +18,7 @@ struct pds_vfio_pci_device {
struct pds_vfio_dirty dirty;
struct mutex state_mutex; /* protect migration state */
enum vfio_device_mig_state state;
- spinlock_t reset_lock; /* protect reset_done flow */
+ struct mutex reset_mutex; /* protect reset_done flow */
u8 deferred_reset;
enum vfio_device_mig_state deferred_reset_state;
struct notifier_block nb;
diff --git a/drivers/video/fbdev/Kconfig b/drivers/video/fbdev/Kconfig
index 53693c826ebd..d5909a9206ff 100644
--- a/drivers/video/fbdev/Kconfig
+++ b/drivers/video/fbdev/Kconfig
@@ -146,10 +146,8 @@ config FB_ACORN
config FB_CLPS711X
tristate "CLPS711X LCD support"
depends on FB && (ARCH_CLPS711X || COMPILE_TEST)
+ select FB_IOMEM_HELPERS
select FB_MODE_HELPERS
- select FB_SYS_FILLRECT
- select FB_SYS_COPYAREA
- select FB_SYS_IMAGEBLIT
select LCD_CLASS_DEVICE
select VIDEOMODE_HELPERS
help
@@ -180,10 +178,7 @@ config FB_IMX
config FB_CYBER2000
tristate "CyberPro 2000/2010/5000 support"
depends on FB && PCI && (BROKEN || !SPARC64)
- select FB_CFB_FILLRECT
- select FB_CFB_COPYAREA
- select FB_CFB_IMAGEBLIT
- select FB_IOMEM_FOPS
+ select FB_IOMEM_HELPERS
select VIDEO_NOMODESET
help
This enables support for the Integraphics CyberPro 20x0 and 5000
@@ -272,10 +267,7 @@ config FB_FM2
config FB_ARC
tristate "Arc Monochrome LCD board support"
depends on FB && (X86 || COMPILE_TEST)
- select FB_SYS_FILLRECT
- select FB_SYS_COPYAREA
- select FB_SYS_IMAGEBLIT
- select FB_SYS_FOPS
+ select FB_SYSMEM_HELPERS_DEFERRED
help
This enables support for the Arc Monochrome LCD board. The board
is based on the KS-108 lcd controller and is typically a matrix
@@ -1460,10 +1452,7 @@ config FB_AU1100
config FB_AU1200
bool "Au1200/Au1300 LCD Driver"
depends on (FB = y) && MIPS_ALCHEMY
- select FB_SYS_FILLRECT
- select FB_SYS_COPYAREA
- select FB_SYS_IMAGEBLIT
- select FB_SYS_FOPS
+ select FB_DMAMEM_HELPERS
help
This is the framebuffer driver for the Au1200/Au1300 SOCs.
It can drive various panels and CRTs by passing in kernel cmd line
@@ -1475,6 +1464,7 @@ config FB_VT8500
select FB_SYS_FILLRECT if (!FB_WMT_GE_ROPS)
select FB_SYS_COPYAREA if (!FB_WMT_GE_ROPS)
select FB_SYS_IMAGEBLIT
+ select FB_SYSMEM_FOPS
select FB_MODE_HELPERS
select VIDEOMODE_HELPERS
help
@@ -1487,6 +1477,7 @@ config FB_WM8505
select FB_SYS_FILLRECT if (!FB_WMT_GE_ROPS)
select FB_SYS_COPYAREA if (!FB_WMT_GE_ROPS)
select FB_SYS_IMAGEBLIT
+ select FB_SYSMEM_FOPS
select FB_MODE_HELPERS
select VIDEOMODE_HELPERS
help
@@ -1637,12 +1628,9 @@ config FB_SH_MOBILE_LCDC
depends on FB && HAVE_CLK && HAS_IOMEM
depends on SUPERH || ARCH_RENESAS || COMPILE_TEST
depends on FB_DEVICE
- select FB_SYS_FILLRECT
- select FB_SYS_COPYAREA
- select FB_SYS_IMAGEBLIT
- select FB_SYS_FOPS
- select FB_DEFERRED_IO
select FB_BACKLIGHT
+ select FB_DEFERRED_IO
+ select FB_DMAMEM_HELPERS
help
Frame buffer driver for the on-chip SH-Mobile LCD controller.
@@ -1689,11 +1677,7 @@ config FB_SMSCUFX
tristate "SMSC UFX6000/7000 USB Framebuffer support"
depends on FB && USB
select FB_MODE_HELPERS
- select FB_SYS_FILLRECT
- select FB_SYS_COPYAREA
- select FB_SYS_IMAGEBLIT
- select FB_SYS_FOPS
- select FB_DEFERRED_IO
+ select FB_SYSMEM_HELPERS_DEFERRED
help
This is a kernel framebuffer driver for SMSC UFX USB devices.
Supports fbdev clients like xf86-video-fbdev, kdrive, fbi, and
@@ -1706,11 +1690,7 @@ config FB_UDL
depends on FB && USB
depends on FB_DEVICE
select FB_MODE_HELPERS
- select FB_SYS_FILLRECT
- select FB_SYS_COPYAREA
- select FB_SYS_IMAGEBLIT
- select FB_SYS_FOPS
- select FB_DEFERRED_IO
+ select FB_SYSMEM_HELPERS_DEFERRED
help
This is a kernel framebuffer driver for DisplayLink USB devices.
Supports fbdev clients like xf86-video-fbdev, kdrive, fbi, and
@@ -1732,10 +1712,7 @@ config FB_IBM_GXT4500
config FB_PS3
tristate "PS3 GPU framebuffer driver"
depends on FB && PS3_PS3AV
- select FB_SYS_FILLRECT
- select FB_SYS_COPYAREA
- select FB_SYS_IMAGEBLIT
- select FB_SYS_FOPS
+ select FB_SYSMEM_HELPERS
help
Include support for the virtual frame buffer in the PS3 platform.
@@ -1800,10 +1777,7 @@ config FB_DA8XX
config FB_VIRTUAL
tristate "Virtual Frame Buffer support (ONLY FOR TESTING!)"
depends on FB
- select FB_SYS_FILLRECT
- select FB_SYS_COPYAREA
- select FB_SYS_IMAGEBLIT
- select FB_SYS_FOPS
+ select FB_SYSMEM_HELPERS
help
This is a `virtual' frame buffer device. It operates on a chunk of
unswappable kernel memory instead of on the memory of a graphics
diff --git a/drivers/video/fbdev/acornfb.c b/drivers/video/fbdev/acornfb.c
index 163d2c9f951c..f0600f6ca254 100644
--- a/drivers/video/fbdev/acornfb.c
+++ b/drivers/video/fbdev/acornfb.c
@@ -605,7 +605,7 @@ acornfb_pan_display(struct fb_var_screeninfo *var, struct fb_info *info)
static const struct fb_ops acornfb_ops = {
.owner = THIS_MODULE,
- FB_IOMEM_DEFAULT_OPS,
+ FB_DEFAULT_IOMEM_OPS,
.fb_check_var = acornfb_check_var,
.fb_set_par = acornfb_set_par,
.fb_setcolreg = acornfb_setcolreg,
diff --git a/drivers/video/fbdev/amba-clcd.c b/drivers/video/fbdev/amba-clcd.c
index 0399db369e70..47d373f04f3f 100644
--- a/drivers/video/fbdev/amba-clcd.c
+++ b/drivers/video/fbdev/amba-clcd.c
@@ -829,6 +829,8 @@ static int clcdfb_of_dma_setup(struct clcd_fb *fb)
static int clcdfb_of_dma_mmap(struct clcd_fb *fb, struct vm_area_struct *vma)
{
+ vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
+
return dma_mmap_wc(&fb->dev->dev, vma, fb->fb.screen_base,
fb->fb.fix.smem_start, fb->fb.fix.smem_len);
}
diff --git a/drivers/video/fbdev/arcfb.c b/drivers/video/fbdev/arcfb.c
index cff11cb04a55..b2408543277c 100644
--- a/drivers/video/fbdev/arcfb.c
+++ b/drivers/video/fbdev/arcfb.c
@@ -363,39 +363,6 @@ static void arcfb_lcd_update(struct arcfb_par *par, unsigned int dx,
}
}
-static void arcfb_fillrect(struct fb_info *info,
- const struct fb_fillrect *rect)
-{
- struct arcfb_par *par = info->par;
-
- sys_fillrect(info, rect);
-
- /* update the physical lcd */
- arcfb_lcd_update(par, rect->dx, rect->dy, rect->width, rect->height);
-}
-
-static void arcfb_copyarea(struct fb_info *info,
- const struct fb_copyarea *area)
-{
- struct arcfb_par *par = info->par;
-
- sys_copyarea(info, area);
-
- /* update the physical lcd */
- arcfb_lcd_update(par, area->dx, area->dy, area->width, area->height);
-}
-
-static void arcfb_imageblit(struct fb_info *info, const struct fb_image *image)
-{
- struct arcfb_par *par = info->par;
-
- sys_imageblit(info, image);
-
- /* update the physical lcd */
- arcfb_lcd_update(par, image->dx, image->dy, image->width,
- image->height);
-}
-
static int arcfb_ioctl(struct fb_info *info,
unsigned int cmd, unsigned long arg)
{
@@ -436,76 +403,48 @@ static int arcfb_ioctl(struct fb_info *info,
}
}
-/*
- * this is the access path from userspace. they can seek and write to
- * the fb. it's inefficient for them to do anything less than 64*8
- * writes since we update the lcd in each write() anyway.
- */
-static ssize_t arcfb_write(struct fb_info *info, const char __user *buf,
- size_t count, loff_t *ppos)
+static void arcfb_damage_range(struct fb_info *info, off_t off, size_t len)
{
- /* modded from epson 1355 */
-
- unsigned long p;
- int err;
- unsigned int fbmemlength,x,y,w,h, bitppos, startpos, endpos, bitcount;
- struct arcfb_par *par;
- unsigned int xres;
-
- if (!info->screen_buffer)
- return -ENODEV;
-
- p = *ppos;
- par = info->par;
- xres = info->var.xres;
- fbmemlength = (xres * info->var.yres)/8;
-
- if (p > fbmemlength)
- return -ENOSPC;
-
- err = 0;
- if ((count + p) > fbmemlength) {
- count = fbmemlength - p;
- err = -ENOSPC;
- }
-
- if (count) {
- char *base_addr;
-
- base_addr = info->screen_buffer;
- count -= copy_from_user(base_addr + p, buf, count);
- *ppos += count;
- err = -EFAULT;
- }
-
+ struct arcfb_par *par = info->par;
+ unsigned int xres = info->var.xres;
+ unsigned int bitppos, startpos, endpos, bitcount;
+ unsigned int x, y, width, height;
- bitppos = p*8;
+ bitppos = off * 8;
startpos = floorXres(bitppos, xres);
- endpos = ceilXres((bitppos + (count*8)), xres);
+ endpos = ceilXres((bitppos + (len * 8)), xres);
bitcount = endpos - startpos;
x = startpos % xres;
y = startpos / xres;
- w = xres;
- h = bitcount / xres;
- arcfb_lcd_update(par, x, y, w, h);
+ width = xres;
+ height = bitcount / xres;
+
+ arcfb_lcd_update(par, x, y, width, height);
+}
- if (count)
- return count;
- return err;
+static void arcfb_damage_area(struct fb_info *info, u32 x, u32 y,
+ u32 width, u32 height)
+{
+ struct arcfb_par *par = info->par;
+
+ /* update the physical lcd */
+ arcfb_lcd_update(par, x, y, width, height);
}
+FB_GEN_DEFAULT_DEFERRED_SYSMEM_OPS(arcfb,
+ arcfb_damage_range,
+ arcfb_damage_area)
+
static const struct fb_ops arcfb_ops = {
.owner = THIS_MODULE,
.fb_open = arcfb_open,
- .fb_read = fb_sys_read,
- .fb_write = arcfb_write,
+ __FB_DEFAULT_DEFERRED_OPS_RDWR(arcfb),
.fb_release = arcfb_release,
.fb_pan_display = arcfb_pan_display,
- .fb_fillrect = arcfb_fillrect,
- .fb_copyarea = arcfb_copyarea,
- .fb_imageblit = arcfb_imageblit,
+ __FB_DEFAULT_DEFERRED_OPS_DRAW(arcfb),
.fb_ioctl = arcfb_ioctl,
+ // .fb_mmap reqires deferred I/O
};
static int arcfb_probe(struct platform_device *dev)
@@ -529,6 +468,7 @@ static int arcfb_probe(struct platform_device *dev)
if (!info)
goto err_fb_alloc;
+ info->flags |= FBINFO_VIRTFB;
info->screen_buffer = videomemory;
info->fbops = &arcfb_ops;
diff --git a/drivers/video/fbdev/au1100fb.c b/drivers/video/fbdev/au1100fb.c
index a9c8d33a6ef7..08109ce535cd 100644
--- a/drivers/video/fbdev/au1100fb.c
+++ b/drivers/video/fbdev/au1100fb.c
@@ -342,6 +342,8 @@ int au1100fb_fb_mmap(struct fb_info *fbi, struct vm_area_struct *vma)
{
struct au1100fb_device *fbdev = to_au1100fb_device(fbi);
+ vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
+
pgprot_val(vma->vm_page_prot) |= (6 << 9); //CCA=6
return dma_mmap_coherent(fbdev->dev, vma, fbdev->fb_mem, fbdev->fb_phys,
diff --git a/drivers/video/fbdev/au1200fb.c b/drivers/video/fbdev/au1200fb.c
index 98afd385c49c..6f20efc663d7 100644
--- a/drivers/video/fbdev/au1200fb.c
+++ b/drivers/video/fbdev/au1200fb.c
@@ -1236,6 +1236,8 @@ static int au1200fb_fb_mmap(struct fb_info *info, struct vm_area_struct *vma)
{
struct au1200fb_device *fbdev = info->par;
+ vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
+
return dma_mmap_coherent(fbdev->dev, vma,
fbdev->fb_mem, fbdev->fb_phys, fbdev->fb_len);
}
@@ -1488,15 +1490,12 @@ static int au1200fb_ioctl(struct fb_info *info, unsigned int cmd,
static const struct fb_ops au1200fb_fb_ops = {
.owner = THIS_MODULE,
+ __FB_DEFAULT_DMAMEM_OPS_RDWR,
.fb_check_var = au1200fb_fb_check_var,
.fb_set_par = au1200fb_fb_set_par,
.fb_setcolreg = au1200fb_fb_setcolreg,
.fb_blank = au1200fb_fb_blank,
- .fb_fillrect = sys_fillrect,
- .fb_copyarea = sys_copyarea,
- .fb_imageblit = sys_imageblit,
- .fb_read = fb_sys_read,
- .fb_write = fb_sys_write,
+ __FB_DEFAULT_DMAMEM_OPS_DRAW,
.fb_sync = NULL,
.fb_ioctl = au1200fb_ioctl,
.fb_mmap = au1200fb_fb_mmap,
@@ -1568,6 +1567,8 @@ static int au1200fb_init_fbinfo(struct au1200fb_device *fbdev)
fbi->fix.mmio_len = 0;
fbi->fix.accel = FB_ACCEL_NONE;
+ fbi->flags |= FBINFO_VIRTFB;
+
fbi->screen_buffer = fbdev->fb_mem;
au1200fb_update_fbinfo(fbi);
diff --git a/drivers/video/fbdev/clps711x-fb.c b/drivers/video/fbdev/clps711x-fb.c
index e956c90efcdc..dcfd1fbbc7e1 100644
--- a/drivers/video/fbdev/clps711x-fb.c
+++ b/drivers/video/fbdev/clps711x-fb.c
@@ -155,13 +155,11 @@ static int clps711x_fb_blank(int blank, struct fb_info *info)
static const struct fb_ops clps711x_fb_ops = {
.owner = THIS_MODULE,
+ FB_DEFAULT_IOMEM_OPS,
.fb_setcolreg = clps711x_fb_setcolreg,
.fb_check_var = clps711x_fb_check_var,
.fb_set_par = clps711x_fb_set_par,
.fb_blank = clps711x_fb_blank,
- .fb_fillrect = sys_fillrect,
- .fb_copyarea = sys_copyarea,
- .fb_imageblit = sys_imageblit,
};
static int clps711x_lcd_check_fb(struct lcd_device *lcddev, struct fb_info *fi)
diff --git a/drivers/video/fbdev/core/Kconfig b/drivers/video/fbdev/core/Kconfig
index 7a3ed13bed70..21053bf00dc5 100644
--- a/drivers/video/fbdev/core/Kconfig
+++ b/drivers/video/fbdev/core/Kconfig
@@ -4,7 +4,6 @@
#
config FB_CORE
- select FB_IOMEM_FOPS
select VIDEO_CMDLINE
tristate
@@ -129,7 +128,7 @@ config FB_LITTLE_ENDIAN
endchoice
-config FB_SYS_FOPS
+config FB_SYSMEM_FOPS
tristate
depends on FB_CORE
@@ -142,8 +141,8 @@ config FB_DMAMEM_HELPERS
depends on FB_CORE
select FB_SYS_COPYAREA
select FB_SYS_FILLRECT
- select FB_SYS_FOPS
select FB_SYS_IMAGEBLIT
+ select FB_SYSMEM_FOPS
config FB_IOMEM_FOPS
tristate
@@ -168,8 +167,8 @@ config FB_SYSMEM_HELPERS
depends on FB_CORE
select FB_SYS_COPYAREA
select FB_SYS_FILLRECT
- select FB_SYS_FOPS
select FB_SYS_IMAGEBLIT
+ select FB_SYSMEM_FOPS
config FB_SYSMEM_HELPERS_DEFERRED
bool
diff --git a/drivers/video/fbdev/core/Makefile b/drivers/video/fbdev/core/Makefile
index c1d657601b2b..d15974759086 100644
--- a/drivers/video/fbdev/core/Makefile
+++ b/drivers/video/fbdev/core/Makefile
@@ -32,6 +32,6 @@ obj-$(CONFIG_FB_IOMEM_FOPS) += fb_io_fops.o
obj-$(CONFIG_FB_SYS_FILLRECT) += sysfillrect.o
obj-$(CONFIG_FB_SYS_COPYAREA) += syscopyarea.o
obj-$(CONFIG_FB_SYS_IMAGEBLIT) += sysimgblt.o
-obj-$(CONFIG_FB_SYS_FOPS) += fb_sys_fops.o
+obj-$(CONFIG_FB_SYSMEM_FOPS) += fb_sys_fops.o
obj-$(CONFIG_FB_SVGALIB) += svgalib.o
obj-$(CONFIG_FB_DDC) += fb_ddc.o
diff --git a/drivers/video/fbdev/core/cfbcopyarea.c b/drivers/video/fbdev/core/cfbcopyarea.c
index 5b80bf3dae50..a271f57d9c6c 100644
--- a/drivers/video/fbdev/core/cfbcopyarea.c
+++ b/drivers/video/fbdev/core/cfbcopyarea.c
@@ -391,6 +391,9 @@ void cfb_copyarea(struct fb_info *p, const struct fb_copyarea *area)
if (p->state != FBINFO_STATE_RUNNING)
return;
+ if (p->flags & FBINFO_VIRTFB)
+ fb_warn_once(p, "Framebuffer is not in I/O address space.");
+
/* if the beginning of the target area might overlap with the end of
the source area, be have to copy the area reverse. */
if ((dy == sy && dx > sx) || (dy > sy)) {
diff --git a/drivers/video/fbdev/core/cfbfillrect.c b/drivers/video/fbdev/core/cfbfillrect.c
index ba9f58b2a5e8..cbaa4c9e2355 100644
--- a/drivers/video/fbdev/core/cfbfillrect.c
+++ b/drivers/video/fbdev/core/cfbfillrect.c
@@ -287,6 +287,9 @@ void cfb_fillrect(struct fb_info *p, const struct fb_fillrect *rect)
if (p->state != FBINFO_STATE_RUNNING)
return;
+ if (p->flags & FBINFO_VIRTFB)
+ fb_warn_once(p, "Framebuffer is not in I/O address space.");
+
if (p->fix.visual == FB_VISUAL_TRUECOLOR ||
p->fix.visual == FB_VISUAL_DIRECTCOLOR )
fg = ((u32 *) (p->pseudo_palette))[rect->color];
diff --git a/drivers/video/fbdev/core/cfbimgblt.c b/drivers/video/fbdev/core/cfbimgblt.c
index 9ebda4e0dc7a..7d1d2f1a627d 100644
--- a/drivers/video/fbdev/core/cfbimgblt.c
+++ b/drivers/video/fbdev/core/cfbimgblt.c
@@ -326,6 +326,9 @@ void cfb_imageblit(struct fb_info *p, const struct fb_image *image)
if (p->state != FBINFO_STATE_RUNNING)
return;
+ if (p->flags & FBINFO_VIRTFB)
+ fb_warn_once(p, "Framebuffer is not in I/O address space.");
+
bitstart = (dy * p->fix.line_length * 8) + (dx * bpp);
start_index = bitstart & (32 - 1);
pitch_index = (p->fix.line_length & (bpl - 1)) * 8;
diff --git a/drivers/video/fbdev/core/fb_chrdev.c b/drivers/video/fbdev/core/fb_chrdev.c
index 32a7315b4b6d..4ebd16b7e3b8 100644
--- a/drivers/video/fbdev/core/fb_chrdev.c
+++ b/drivers/video/fbdev/core/fb_chrdev.c
@@ -34,13 +34,13 @@ static ssize_t fb_read(struct file *file, char __user *buf, size_t count, loff_t
if (!info)
return -ENODEV;
+ if (fb_WARN_ON_ONCE(info, !info->fbops->fb_read))
+ return -EINVAL;
+
if (info->state != FBINFO_STATE_RUNNING)
return -EPERM;
- if (info->fbops->fb_read)
- return info->fbops->fb_read(info, buf, count, ppos);
-
- return fb_io_read(info, buf, count, ppos);
+ return info->fbops->fb_read(info, buf, count, ppos);
}
static ssize_t fb_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
@@ -50,13 +50,13 @@ static ssize_t fb_write(struct file *file, const char __user *buf, size_t count,
if (!info)
return -ENODEV;
+ if (fb_WARN_ON_ONCE(info, !info->fbops->fb_write))
+ return -EINVAL;
+
if (info->state != FBINFO_STATE_RUNNING)
return -EPERM;
- if (info->fbops->fb_write)
- return info->fbops->fb_write(info, buf, count, ppos);
-
- return fb_io_write(info, buf, count, ppos);
+ return info->fbops->fb_write(info, buf, count, ppos);
}
static long do_fb_ioctl(struct fb_info *info, unsigned int cmd,
@@ -314,61 +314,19 @@ static long fb_compat_ioctl(struct file *file, unsigned int cmd,
static int fb_mmap(struct file *file, struct vm_area_struct *vma)
{
struct fb_info *info = file_fb_info(file);
- unsigned long mmio_pgoff;
- unsigned long start;
- u32 len;
+ int res;
if (!info)
return -ENODEV;
- mutex_lock(&info->mm_lock);
- if (info->fbops->fb_mmap) {
- int res;
-
- /*
- * The framebuffer needs to be accessed decrypted, be sure
- * SME protection is removed ahead of the call
- */
- vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
- res = info->fbops->fb_mmap(info, vma);
- mutex_unlock(&info->mm_lock);
- return res;
-#if IS_ENABLED(CONFIG_FB_DEFERRED_IO)
- } else if (info->fbdefio) {
- /*
- * FB deferred I/O wants you to handle mmap in your drivers. At a
- * minimum, point struct fb_ops.fb_mmap to fb_deferred_io_mmap().
- */
- dev_warn_once(info->dev, "fbdev mmap not set up for deferred I/O.\n");
- mutex_unlock(&info->mm_lock);
+ if (fb_WARN_ON_ONCE(info, !info->fbops->fb_mmap))
return -ENODEV;
-#endif
- }
-
- /*
- * Ugh. This can be either the frame buffer mapping, or
- * if pgoff points past it, the mmio mapping.
- */
- start = info->fix.smem_start;
- len = info->fix.smem_len;
- mmio_pgoff = PAGE_ALIGN((start & ~PAGE_MASK) + len) >> PAGE_SHIFT;
- if (vma->vm_pgoff >= mmio_pgoff) {
- if (info->var.accel_flags) {
- mutex_unlock(&info->mm_lock);
- return -EINVAL;
- }
- vma->vm_pgoff -= mmio_pgoff;
- start = info->fix.mmio_start;
- len = info->fix.mmio_len;
- }
+ mutex_lock(&info->mm_lock);
+ res = info->fbops->fb_mmap(info, vma);
mutex_unlock(&info->mm_lock);
- vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
- vma->vm_page_prot = pgprot_framebuffer(vma->vm_page_prot, vma->vm_start,
- vma->vm_end, start);
-
- return vm_iomap_memory(vma, start, len);
+ return res;
}
static int fb_open(struct inode *inode, struct file *file)
diff --git a/drivers/video/fbdev/core/fb_defio.c b/drivers/video/fbdev/core/fb_defio.c
index 274f5d0fa247..1b0b85e59e5e 100644
--- a/drivers/video/fbdev/core/fb_defio.c
+++ b/drivers/video/fbdev/core/fb_defio.c
@@ -227,6 +227,8 @@ static const struct address_space_operations fb_deferred_io_aops = {
int fb_deferred_io_mmap(struct fb_info *info, struct vm_area_struct *vma)
{
+ vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
+
vma->vm_ops = &fb_deferred_io_vm_ops;
vm_flags_set(vma, VM_DONTEXPAND | VM_DONTDUMP);
if (!(info->flags & FBINFO_VIRTFB))
diff --git a/drivers/video/fbdev/core/fb_io_fops.c b/drivers/video/fbdev/core/fb_io_fops.c
index 871b829521af..3408ff1b2b7a 100644
--- a/drivers/video/fbdev/core/fb_io_fops.c
+++ b/drivers/video/fbdev/core/fb_io_fops.c
@@ -12,6 +12,9 @@ ssize_t fb_io_read(struct fb_info *info, char __user *buf, size_t count, loff_t
int c, cnt = 0, err = 0;
unsigned long total_size, trailing;
+ if (info->flags & FBINFO_VIRTFB)
+ fb_warn_once(info, "Framebuffer is not in I/O address space.");
+
if (!info->screen_base)
return -ENODEV;
@@ -73,6 +76,9 @@ ssize_t fb_io_write(struct fb_info *info, const char __user *buf, size_t count,
int c, cnt = 0, err = 0;
unsigned long total_size, trailing;
+ if (info->flags & FBINFO_VIRTFB)
+ fb_warn_once(info, "Framebuffer is not in I/O address space.");
+
if (!info->screen_base)
return -ENODEV;
@@ -132,5 +138,35 @@ ssize_t fb_io_write(struct fb_info *info, const char __user *buf, size_t count,
}
EXPORT_SYMBOL(fb_io_write);
+int fb_io_mmap(struct fb_info *info, struct vm_area_struct *vma)
+{
+ unsigned long start = info->fix.smem_start;
+ u32 len = info->fix.smem_len;
+ unsigned long mmio_pgoff = PAGE_ALIGN((start & ~PAGE_MASK) + len) >> PAGE_SHIFT;
+
+ if (info->flags & FBINFO_VIRTFB)
+ fb_warn_once(info, "Framebuffer is not in I/O address space.");
+
+ /*
+ * This can be either the framebuffer mapping, or if pgoff points
+ * past it, the mmio mapping.
+ */
+ if (vma->vm_pgoff >= mmio_pgoff) {
+ if (info->var.accel_flags)
+ return -EINVAL;
+
+ vma->vm_pgoff -= mmio_pgoff;
+ start = info->fix.mmio_start;
+ len = info->fix.mmio_len;
+ }
+
+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
+ vma->vm_page_prot = pgprot_framebuffer(vma->vm_page_prot, vma->vm_start,
+ vma->vm_end, start);
+
+ return vm_iomap_memory(vma, start, len);
+}
+EXPORT_SYMBOL(fb_io_mmap);
+
MODULE_DESCRIPTION("Fbdev helpers for framebuffers in I/O memory");
MODULE_LICENSE("GPL");
diff --git a/drivers/video/fbdev/core/fb_sys_fops.c b/drivers/video/fbdev/core/fb_sys_fops.c
index 0cb0989abda6..a9aa6519a5b3 100644
--- a/drivers/video/fbdev/core/fb_sys_fops.c
+++ b/drivers/video/fbdev/core/fb_sys_fops.c
@@ -22,6 +22,9 @@ ssize_t fb_sys_read(struct fb_info *info, char __user *buf, size_t count,
unsigned long total_size, c;
ssize_t ret;
+ if (!(info->flags & FBINFO_VIRTFB))
+ fb_warn_once(info, "Framebuffer is not in virtual address space.");
+
if (!info->screen_buffer)
return -ENODEV;
@@ -64,6 +67,9 @@ ssize_t fb_sys_write(struct fb_info *info, const char __user *buf,
unsigned long total_size, c;
size_t ret;
+ if (!(info->flags & FBINFO_VIRTFB))
+ fb_warn_once(info, "Framebuffer is not in virtual address space.");
+
if (!info->screen_buffer)
return -ENODEV;
diff --git a/drivers/video/fbdev/core/syscopyarea.c b/drivers/video/fbdev/core/syscopyarea.c
index 7b8bd3a2bedc..75e7001e8450 100644
--- a/drivers/video/fbdev/core/syscopyarea.c
+++ b/drivers/video/fbdev/core/syscopyarea.c
@@ -324,6 +324,9 @@ void sys_copyarea(struct fb_info *p, const struct fb_copyarea *area)
if (p->state != FBINFO_STATE_RUNNING)
return;
+ if (!(p->flags & FBINFO_VIRTFB))
+ fb_warn_once(p, "Framebuffer is not in virtual address space.");
+
/* if the beginning of the target area might overlap with the end of
the source area, be have to copy the area reverse. */
if ((dy == sy && dx > sx) || (dy > sy)) {
diff --git a/drivers/video/fbdev/core/sysfillrect.c b/drivers/video/fbdev/core/sysfillrect.c
index bcdcaeae6538..e49221a88ccc 100644
--- a/drivers/video/fbdev/core/sysfillrect.c
+++ b/drivers/video/fbdev/core/sysfillrect.c
@@ -242,6 +242,9 @@ void sys_fillrect(struct fb_info *p, const struct fb_fillrect *rect)
if (p->state != FBINFO_STATE_RUNNING)
return;
+ if (!(p->flags & FBINFO_VIRTFB))
+ fb_warn_once(p, "Framebuffer is not in virtual address space.");
+
if (p->fix.visual == FB_VISUAL_TRUECOLOR ||
p->fix.visual == FB_VISUAL_DIRECTCOLOR )
fg = ((u32 *) (p->pseudo_palette))[rect->color];
diff --git a/drivers/video/fbdev/core/sysimgblt.c b/drivers/video/fbdev/core/sysimgblt.c
index 665ef7a0a249..6949bbd51d92 100644
--- a/drivers/video/fbdev/core/sysimgblt.c
+++ b/drivers/video/fbdev/core/sysimgblt.c
@@ -296,6 +296,9 @@ void sys_imageblit(struct fb_info *p, const struct fb_image *image)
if (p->state != FBINFO_STATE_RUNNING)
return;
+ if (!(p->flags & FBINFO_VIRTFB))
+ fb_warn_once(p, "Framebuffer is not in virtual address space.");
+
bitstart = (dy * p->fix.line_length * 8) + (dx * bpp);
start_index = bitstart & (32 - 1);
pitch_index = (p->fix.line_length & (bpl - 1)) * 8;
diff --git a/drivers/video/fbdev/cyber2000fb.c b/drivers/video/fbdev/cyber2000fb.c
index 52105dc1a72f..abb87d3576db 100644
--- a/drivers/video/fbdev/cyber2000fb.c
+++ b/drivers/video/fbdev/cyber2000fb.c
@@ -227,13 +227,6 @@ cyber2000fb_copyarea(struct fb_info *info, const struct fb_copyarea *region)
CO_REG_CMD_H, cfb);
}
-static void
-cyber2000fb_imageblit(struct fb_info *info, const struct fb_image *image)
-{
- cfb_imageblit(info, image);
- return;
-}
-
static int cyber2000fb_sync(struct fb_info *info)
{
struct cfb_info *cfb = container_of(info, struct cfb_info, fb);
@@ -1069,7 +1062,7 @@ static const struct fb_ops cyber2000fb_ops = {
.fb_pan_display = cyber2000fb_pan_display,
.fb_fillrect = cyber2000fb_fillrect,
.fb_copyarea = cyber2000fb_copyarea,
- .fb_imageblit = cyber2000fb_imageblit,
+ .fb_imageblit = cfb_imageblit,
.fb_sync = cyber2000fb_sync,
__FB_DEFAULT_IOMEM_OPS_MMAP,
};
diff --git a/drivers/video/fbdev/ep93xx-fb.c b/drivers/video/fbdev/ep93xx-fb.c
index cae00deee001..3e378874ccc7 100644
--- a/drivers/video/fbdev/ep93xx-fb.c
+++ b/drivers/video/fbdev/ep93xx-fb.c
@@ -311,6 +311,8 @@ static int ep93xxfb_mmap(struct fb_info *info, struct vm_area_struct *vma)
{
unsigned int offset = vma->vm_pgoff << PAGE_SHIFT;
+ vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
+
if (offset < info->fix.smem_len) {
return dma_mmap_wc(info->device, vma, info->screen_base,
info->fix.smem_start, info->fix.smem_len);
diff --git a/drivers/video/fbdev/gbefb.c b/drivers/video/fbdev/gbefb.c
index e89e5579258e..8463de833d1e 100644
--- a/drivers/video/fbdev/gbefb.c
+++ b/drivers/video/fbdev/gbefb.c
@@ -1000,6 +1000,8 @@ static int gbefb_mmap(struct fb_info *info,
unsigned long phys_addr, phys_size;
u16 *tile;
+ vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
+
/* check range */
if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT))
return -EINVAL;
diff --git a/drivers/video/fbdev/omap/omapfb_main.c b/drivers/video/fbdev/omap/omapfb_main.c
index 694cf6318782..aa31c0d26e92 100644
--- a/drivers/video/fbdev/omap/omapfb_main.c
+++ b/drivers/video/fbdev/omap/omapfb_main.c
@@ -1203,6 +1203,8 @@ static int omapfb_mmap(struct fb_info *info, struct vm_area_struct *vma)
struct omapfb_device *fbdev = plane->fbdev;
int r;
+ vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
+
omapfb_rqueue_lock(fbdev);
r = fbdev->ctrl->mmap(info, vma);
omapfb_rqueue_unlock(fbdev);
diff --git a/drivers/video/fbdev/omap2/omapfb/omapfb-main.c b/drivers/video/fbdev/omap2/omapfb/omapfb-main.c
index c9fd0ad352d7..0db9c55fce5a 100644
--- a/drivers/video/fbdev/omap2/omapfb/omapfb-main.c
+++ b/drivers/video/fbdev/omap2/omapfb/omapfb-main.c
@@ -1095,6 +1095,8 @@ static int omapfb_mmap(struct fb_info *fbi, struct vm_area_struct *vma)
u32 len;
int r;
+ vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
+
rg = omapfb_get_mem_region(ofbi->region);
start = omapfb_get_region_paddr(ofbi);
diff --git a/drivers/video/fbdev/ps3fb.c b/drivers/video/fbdev/ps3fb.c
index 64d291d6b153..dbcda307f6a6 100644
--- a/drivers/video/fbdev/ps3fb.c
+++ b/drivers/video/fbdev/ps3fb.c
@@ -708,6 +708,8 @@ static int ps3fb_mmap(struct fb_info *info, struct vm_area_struct *vma)
{
int r;
+ vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
+
r = vm_iomap_memory(vma, info->fix.smem_start, info->fix.smem_len);
dev_dbg(info->device, "ps3fb: mmap framebuffer P(%lx)->V(%lx)\n",
@@ -939,15 +941,12 @@ static const struct fb_ops ps3fb_ops = {
.owner = THIS_MODULE,
.fb_open = ps3fb_open,
.fb_release = ps3fb_release,
- .fb_read = fb_sys_read,
- .fb_write = fb_sys_write,
+ __FB_DEFAULT_SYSMEM_OPS_RDWR,
.fb_check_var = ps3fb_check_var,
.fb_set_par = ps3fb_set_par,
.fb_setcolreg = ps3fb_setcolreg,
.fb_pan_display = ps3fb_pan_display,
- .fb_fillrect = sys_fillrect,
- .fb_copyarea = sys_copyarea,
- .fb_imageblit = sys_imageblit,
+ __FB_DEFAULT_SYSMEM_OPS_DRAW,
.fb_mmap = ps3fb_mmap,
.fb_blank = ps3fb_blank,
.fb_ioctl = ps3fb_ioctl,
@@ -1145,7 +1144,7 @@ static int ps3fb_probe(struct ps3_system_bus_device *dev)
info->fix.smem_len = ps3fb_videomemory.size - GPU_FB_START;
info->pseudo_palette = par->pseudo_palette;
- info->flags = FBINFO_READS_FAST |
+ info->flags = FBINFO_VIRTFB | FBINFO_READS_FAST |
FBINFO_HWACCEL_XPAN | FBINFO_HWACCEL_YPAN;
retval = fb_alloc_cmap(&info->cmap, 256, 0);
diff --git a/drivers/video/fbdev/sa1100fb.c b/drivers/video/fbdev/sa1100fb.c
index befd3fe2f659..0d362d2bf0e3 100644
--- a/drivers/video/fbdev/sa1100fb.c
+++ b/drivers/video/fbdev/sa1100fb.c
@@ -562,6 +562,8 @@ static int sa1100fb_mmap(struct fb_info *info,
container_of(info, struct sa1100fb_info, fb);
unsigned long off = vma->vm_pgoff << PAGE_SHIFT;
+ vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
+
if (off < info->fix.smem_len) {
vma->vm_pgoff += 1; /* skip over the palette */
return dma_mmap_wc(fbi->dev, vma, fbi->map_cpu, fbi->map_dma,
diff --git a/drivers/video/fbdev/sbuslib.c b/drivers/video/fbdev/sbuslib.c
index 21e9fd8e69e2..634e3d159452 100644
--- a/drivers/video/fbdev/sbuslib.c
+++ b/drivers/video/fbdev/sbuslib.c
@@ -48,7 +48,7 @@ int sbusfb_mmap_helper(struct sbus_mmap_map *map,
unsigned long map_offset = 0;
unsigned long off;
int i;
-
+
if (!(vma->vm_flags & (VM_SHARED | VM_MAYSHARE)))
return -EINVAL;
@@ -60,6 +60,7 @@ int sbusfb_mmap_helper(struct sbus_mmap_map *map,
/* VM_IO | VM_DONTEXPAND | VM_DONTDUMP are set by remap_pfn_range() */
+ vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
/* Each page, see which map applies */
@@ -72,7 +73,7 @@ int sbusfb_mmap_helper(struct sbus_mmap_map *map,
#define POFF_MASK (PAGE_MASK|0x1UL)
#else
#define POFF_MASK (PAGE_MASK)
-#endif
+#endif
map_offset = (physbase + map[i].poff) & POFF_MASK;
break;
}
diff --git a/drivers/video/fbdev/sh_mobile_lcdcfb.c b/drivers/video/fbdev/sh_mobile_lcdcfb.c
index 1364dafaadb1..eb2297b37504 100644
--- a/drivers/video/fbdev/sh_mobile_lcdcfb.c
+++ b/drivers/video/fbdev/sh_mobile_lcdcfb.c
@@ -1482,19 +1482,18 @@ sh_mobile_lcdc_overlay_mmap(struct fb_info *info, struct vm_area_struct *vma)
if (info->fbdefio)
return fb_deferred_io_mmap(info, vma);
+ vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
+
return dma_mmap_coherent(ovl->channel->lcdc->dev, vma, ovl->fb_mem,
ovl->dma_handle, ovl->fb_size);
}
static const struct fb_ops sh_mobile_lcdc_overlay_ops = {
.owner = THIS_MODULE,
- .fb_read = fb_sys_read,
- .fb_write = fb_sys_write,
- .fb_fillrect = sys_fillrect,
- .fb_copyarea = sys_copyarea,
- .fb_imageblit = sys_imageblit,
+ __FB_DEFAULT_DMAMEM_OPS_RDWR,
.fb_blank = sh_mobile_lcdc_overlay_blank,
.fb_pan_display = sh_mobile_lcdc_overlay_pan,
+ __FB_DEFAULT_DMAMEM_OPS_DRAW,
.fb_ioctl = sh_mobile_lcdc_overlay_ioctl,
.fb_check_var = sh_mobile_lcdc_overlay_check_var,
.fb_set_par = sh_mobile_lcdc_overlay_set_par,
@@ -1567,6 +1566,7 @@ sh_mobile_lcdc_overlay_fb_init(struct sh_mobile_lcdc_overlay *ovl)
info->fbops = &sh_mobile_lcdc_overlay_ops;
info->device = priv->dev;
+ info->flags |= FBINFO_VIRTFB;
info->screen_buffer = ovl->fb_mem;
info->par = ovl;
@@ -1958,6 +1958,8 @@ sh_mobile_lcdc_mmap(struct fb_info *info, struct vm_area_struct *vma)
if (info->fbdefio)
return fb_deferred_io_mmap(info, vma);
+ vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
+
return dma_mmap_coherent(ch->lcdc->dev, vma, ch->fb_mem,
ch->dma_handle, ch->fb_size);
}
@@ -1965,8 +1967,7 @@ sh_mobile_lcdc_mmap(struct fb_info *info, struct vm_area_struct *vma)
static const struct fb_ops sh_mobile_lcdc_ops = {
.owner = THIS_MODULE,
.fb_setcolreg = sh_mobile_lcdc_setcolreg,
- .fb_read = fb_sys_read,
- .fb_write = fb_sys_write,
+ __FB_DEFAULT_DMAMEM_OPS_RDWR,
.fb_fillrect = sh_mobile_lcdc_fillrect,
.fb_copyarea = sh_mobile_lcdc_copyarea,
.fb_imageblit = sh_mobile_lcdc_imageblit,
@@ -2053,6 +2054,7 @@ sh_mobile_lcdc_channel_fb_init(struct sh_mobile_lcdc_chan *ch,
info->fbops = &sh_mobile_lcdc_ops;
info->device = priv->dev;
+ info->flags |= FBINFO_VIRTFB;
info->screen_buffer = ch->fb_mem;
info->pseudo_palette = &ch->pseudo_palette;
info->par = ch;
diff --git a/drivers/video/fbdev/sm712fb.c b/drivers/video/fbdev/sm712fb.c
index 3f8ef50e3209..104f122e0f27 100644
--- a/drivers/video/fbdev/sm712fb.c
+++ b/drivers/video/fbdev/sm712fb.c
@@ -1347,16 +1347,14 @@ static int smtc_set_par(struct fb_info *info)
static const struct fb_ops smtcfb_ops = {
.owner = THIS_MODULE,
- FB_DEFAULT_IOMEM_OPS,
.fb_check_var = smtc_check_var,
.fb_set_par = smtc_set_par,
.fb_setcolreg = smtc_setcolreg,
.fb_blank = smtc_blank,
- .fb_fillrect = cfb_fillrect,
- .fb_imageblit = cfb_imageblit,
- .fb_copyarea = cfb_copyarea,
+ __FB_DEFAULT_IOMEM_OPS_DRAW,
.fb_read = smtcfb_read,
.fb_write = smtcfb_write,
+ __FB_DEFAULT_IOMEM_OPS_MMAP,
};
/*
diff --git a/drivers/video/fbdev/smscufx.c b/drivers/video/fbdev/smscufx.c
index 90a77d19b236..35d682b110c4 100644
--- a/drivers/video/fbdev/smscufx.c
+++ b/drivers/video/fbdev/smscufx.c
@@ -783,6 +783,8 @@ static int ufx_ops_mmap(struct fb_info *info, struct vm_area_struct *vma)
if (info->fbdefio)
return fb_deferred_io_mmap(info, vma);
+ vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
+
if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT))
return -EINVAL;
if (size > info->fix.smem_len)
diff --git a/drivers/video/fbdev/udlfb.c b/drivers/video/fbdev/udlfb.c
index 2460ff4ac86b..1514ddac4caf 100644
--- a/drivers/video/fbdev/udlfb.c
+++ b/drivers/video/fbdev/udlfb.c
@@ -331,6 +331,8 @@ static int dlfb_ops_mmap(struct fb_info *info, struct vm_area_struct *vma)
if (info->fbdefio)
return fb_deferred_io_mmap(info, vma);
+ vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
+
if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT))
return -EINVAL;
if (size > info->fix.smem_len)
diff --git a/drivers/video/fbdev/vermilion/vermilion.c b/drivers/video/fbdev/vermilion/vermilion.c
index 840ead69654b..a087b42ca652 100644
--- a/drivers/video/fbdev/vermilion/vermilion.c
+++ b/drivers/video/fbdev/vermilion/vermilion.c
@@ -998,6 +998,8 @@ static int vmlfb_mmap(struct fb_info *info, struct vm_area_struct *vma)
int ret;
unsigned long prot;
+ vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
+
ret = vmlfb_vram_offset(vinfo, offset);
if (ret)
return -EINVAL;
diff --git a/drivers/video/fbdev/vfb.c b/drivers/video/fbdev/vfb.c
index 1b7c338f9956..f86149ba3835 100644
--- a/drivers/video/fbdev/vfb.c
+++ b/drivers/video/fbdev/vfb.c
@@ -80,15 +80,12 @@ static int vfb_mmap(struct fb_info *info,
static const struct fb_ops vfb_ops = {
.owner = THIS_MODULE,
- .fb_read = fb_sys_read,
- .fb_write = fb_sys_write,
+ __FB_DEFAULT_SYSMEM_OPS_RDWR,
.fb_check_var = vfb_check_var,
.fb_set_par = vfb_set_par,
.fb_setcolreg = vfb_setcolreg,
.fb_pan_display = vfb_pan_display,
- .fb_fillrect = sys_fillrect,
- .fb_copyarea = sys_copyarea,
- .fb_imageblit = sys_imageblit,
+ __FB_DEFAULT_SYSMEM_OPS_DRAW,
.fb_mmap = vfb_mmap,
};
@@ -385,6 +382,8 @@ static int vfb_pan_display(struct fb_var_screeninfo *var,
static int vfb_mmap(struct fb_info *info,
struct vm_area_struct *vma)
{
+ vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
+
return remap_vmalloc_range(vma, (void *)info->fix.smem_start, vma->vm_pgoff);
}
@@ -440,6 +439,7 @@ static int vfb_probe(struct platform_device *dev)
if (!info)
goto err;
+ info->flags |= FBINFO_VIRTFB;
info->screen_buffer = videomemory;
info->fbops = &vfb_ops;
diff --git a/drivers/video/fbdev/vt8500lcdfb.c b/drivers/video/fbdev/vt8500lcdfb.c
index 42d39a9d5130..42c25dc85197 100644
--- a/drivers/video/fbdev/vt8500lcdfb.c
+++ b/drivers/video/fbdev/vt8500lcdfb.c
@@ -241,6 +241,7 @@ static int vt8500lcd_blank(int blank, struct fb_info *info)
static const struct fb_ops vt8500lcd_ops = {
.owner = THIS_MODULE,
+ __FB_DEFAULT_DMAMEM_OPS_RDWR,
.fb_set_par = vt8500lcd_set_par,
.fb_setcolreg = vt8500lcd_setcolreg,
.fb_fillrect = wmt_ge_fillrect,
@@ -250,6 +251,7 @@ static const struct fb_ops vt8500lcd_ops = {
.fb_ioctl = vt8500lcd_ioctl,
.fb_pan_display = vt8500lcd_pan_display,
.fb_blank = vt8500lcd_blank,
+ // .fb_mmap needs DMA mmap
};
static irqreturn_t vt8500lcd_handle_irq(int irq, void *dev_id)
@@ -357,7 +359,7 @@ static int vt8500lcd_probe(struct platform_device *pdev)
fbi->fb.fix.smem_start = fb_mem_phys;
fbi->fb.fix.smem_len = fb_mem_len;
- fbi->fb.screen_base = fb_mem_virt;
+ fbi->fb.screen_buffer = fb_mem_virt;
fbi->palette_size = PAGE_ALIGN(512);
fbi->palette_cpu = dma_alloc_coherent(&pdev->dev,
diff --git a/drivers/video/fbdev/wm8505fb.c b/drivers/video/fbdev/wm8505fb.c
index 5833147aa43d..00952e9c8802 100644
--- a/drivers/video/fbdev/wm8505fb.c
+++ b/drivers/video/fbdev/wm8505fb.c
@@ -248,6 +248,7 @@ static int wm8505fb_blank(int blank, struct fb_info *info)
static const struct fb_ops wm8505fb_ops = {
.owner = THIS_MODULE,
+ __FB_DEFAULT_DMAMEM_OPS_RDWR,
.fb_set_par = wm8505fb_set_par,
.fb_setcolreg = wm8505fb_setcolreg,
.fb_fillrect = wmt_ge_fillrect,
@@ -256,6 +257,7 @@ static const struct fb_ops wm8505fb_ops = {
.fb_sync = wmt_ge_sync,
.fb_pan_display = wm8505fb_pan_display,
.fb_blank = wm8505fb_blank,
+ __FB_DEFAULT_IOMEM_OPS_MMAP,
};
static int wm8505fb_probe(struct platform_device *pdev)
diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c
index f5edb9e27e3c..b8cfea7812d6 100644
--- a/drivers/xen/events/events_base.c
+++ b/drivers/xen/events/events_base.c
@@ -1110,8 +1110,10 @@ int xen_bind_pirq_msi_to_irq(struct pci_dev *dev, struct msi_desc *msidesc,
for (i = 0; i < nvec; i++) {
info = xen_irq_init(irq + i);
- if (!info)
+ if (!info) {
+ ret = -ENOMEM;
goto error_irq;
+ }
irq_set_chip_and_handler_name(irq + i, &xen_pirq_chip, handle_edge_irq, name);
diff --git a/fs/Kconfig b/fs/Kconfig
index fd1f655b4f1f..42837617a55b 100644
--- a/fs/Kconfig
+++ b/fs/Kconfig
@@ -268,6 +268,7 @@ config HUGETLBFS
config HUGETLB_PAGE
def_bool HUGETLBFS
+ select XARRAY_MULTI
config HUGETLB_PAGE_OPTIMIZE_VMEMMAP
def_bool HUGETLB_PAGE
diff --git a/fs/bcachefs/Kconfig b/fs/bcachefs/Kconfig
index c08c2c7d6fbb..fddc7be58022 100644
--- a/fs/bcachefs/Kconfig
+++ b/fs/bcachefs/Kconfig
@@ -33,6 +33,18 @@ config BCACHEFS_QUOTA
depends on BCACHEFS_FS
select QUOTACTL
+config BCACHEFS_ERASURE_CODING
+ bool "bcachefs erasure coding (RAID5/6) support (EXPERIMENTAL)"
+ depends on BCACHEFS_FS
+ select QUOTACTL
+ help
+ This enables the "erasure_code" filesysystem and inode option, which
+ organizes data into reed-solomon stripes instead of ordinary
+ replication.
+
+ WARNING: this feature is still undergoing on disk format changes, and
+ should only be enabled for testing purposes.
+
config BCACHEFS_POSIX_ACL
bool "bcachefs POSIX ACL support"
depends on BCACHEFS_FS
diff --git a/fs/bcachefs/alloc_foreground.c b/fs/bcachefs/alloc_foreground.c
index b85c7765272f..1ba0eeb7552a 100644
--- a/fs/bcachefs/alloc_foreground.c
+++ b/fs/bcachefs/alloc_foreground.c
@@ -1297,6 +1297,30 @@ out:
return wp;
}
+static noinline void
+deallocate_extra_replicas(struct bch_fs *c,
+ struct open_buckets *ptrs,
+ struct open_buckets *ptrs_no_use,
+ unsigned extra_replicas)
+{
+ struct open_buckets ptrs2 = { 0 };
+ struct open_bucket *ob;
+ unsigned i;
+
+ open_bucket_for_each(c, ptrs, ob, i) {
+ unsigned d = bch_dev_bkey_exists(c, ob->dev)->mi.durability;
+
+ if (d && d <= extra_replicas) {
+ extra_replicas -= d;
+ ob_push(c, ptrs_no_use, ob);
+ } else {
+ ob_push(c, &ptrs2, ob);
+ }
+ }
+
+ *ptrs = ptrs2;
+}
+
/*
* Get us an open_bucket we can allocate from, return with it locked:
*/
@@ -1321,6 +1345,9 @@ int bch2_alloc_sectors_start_trans(struct btree_trans *trans,
int ret;
int i;
+ if (!IS_ENABLED(CONFIG_BCACHEFS_ERASURE_CODING))
+ erasure_code = false;
+
BUG_ON(flags & BCH_WRITE_ONLY_SPECIFIED_DEVS);
BUG_ON(!nr_replicas || !nr_replicas_required);
@@ -1382,6 +1409,9 @@ alloc_done:
if (ret)
goto err;
+ if (nr_effective > nr_replicas)
+ deallocate_extra_replicas(c, &ptrs, &wp->ptrs, nr_effective - nr_replicas);
+
/* Free buckets we didn't use: */
open_bucket_for_each(c, &wp->ptrs, ob, i)
open_bucket_free_unused(c, ob);
diff --git a/fs/bcachefs/bcachefs.h b/fs/bcachefs/bcachefs.h
index 403aa3389fcc..dfa22f9d9a1d 100644
--- a/fs/bcachefs/bcachefs.h
+++ b/fs/bcachefs/bcachefs.h
@@ -638,6 +638,8 @@ struct journal_keys {
size_t gap;
size_t nr;
size_t size;
+ atomic_t ref;
+ bool initial_ref_held;
};
struct btree_trans_buf {
@@ -929,7 +931,7 @@ struct bch_fs {
mempool_t compression_bounce[2];
mempool_t compress_workspace[BCH_COMPRESSION_TYPE_NR];
mempool_t decompress_workspace;
- ZSTD_parameters zstd_params;
+ size_t zstd_workspace_size;
struct crypto_shash *sha256;
struct crypto_sync_skcipher *chacha20;
diff --git a/fs/bcachefs/bcachefs_format.h b/fs/bcachefs/bcachefs_format.h
index 0a750953ff92..1ab1f08d763b 100644
--- a/fs/bcachefs/bcachefs_format.h
+++ b/fs/bcachefs/bcachefs_format.h
@@ -151,7 +151,11 @@ struct bpos {
#else
#error edit for your odd byteorder.
#endif
-} __packed __aligned(4);
+} __packed
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+__aligned(4)
+#endif
+;
#define KEY_INODE_MAX ((__u64)~0ULL)
#define KEY_OFFSET_MAX ((__u64)~0ULL)
@@ -1528,7 +1532,7 @@ struct bch_sb_field_disk_groups {
x(move_extent_write, 36) \
x(move_extent_finish, 37) \
x(move_extent_fail, 38) \
- x(move_extent_alloc_mem_fail, 39) \
+ x(move_extent_start_fail, 39) \
x(copygc, 40) \
x(copygc_wait, 41) \
x(gc_gens_end, 42) \
diff --git a/fs/bcachefs/btree_gc.c b/fs/bcachefs/btree_gc.c
index 0b5d09c8475d..30ab78a24517 100644
--- a/fs/bcachefs/btree_gc.c
+++ b/fs/bcachefs/btree_gc.c
@@ -1541,8 +1541,8 @@ static int bch2_gc_alloc_start(struct bch_fs *c, bool metadata_only)
rcu_assign_pointer(ca->buckets_gc, buckets);
}
- for_each_btree_key(trans, iter, BTREE_ID_alloc, POS_MIN,
- BTREE_ITER_PREFETCH, k, ret) {
+ ret = for_each_btree_key2(trans, iter, BTREE_ID_alloc, POS_MIN,
+ BTREE_ITER_PREFETCH, k, ({
ca = bch_dev_bkey_exists(c, k.k->p.inode);
g = gc_bucket(ca, k.k->p.offset);
@@ -1561,8 +1561,9 @@ static int bch2_gc_alloc_start(struct bch_fs *c, bool metadata_only)
g->stripe = a->stripe;
g->stripe_redundancy = a->stripe_redundancy;
}
- }
- bch2_trans_iter_exit(trans, &iter);
+
+ 0;
+ }));
err:
bch2_trans_put(trans);
if (ret)
diff --git a/fs/bcachefs/btree_io.c b/fs/bcachefs/btree_io.c
index 37d896edb06e..57c20390e10e 100644
--- a/fs/bcachefs/btree_io.c
+++ b/fs/bcachefs/btree_io.c
@@ -1358,10 +1358,9 @@ static bool btree_node_has_extra_bsets(struct bch_fs *c, unsigned offset, void *
return offset;
}
-static void btree_node_read_all_replicas_done(struct closure *cl)
+static CLOSURE_CALLBACK(btree_node_read_all_replicas_done)
{
- struct btree_node_read_all *ra =
- container_of(cl, struct btree_node_read_all, cl);
+ closure_type(ra, struct btree_node_read_all, cl);
struct bch_fs *c = ra->c;
struct btree *b = ra->b;
struct printbuf buf = PRINTBUF;
@@ -1567,7 +1566,7 @@ static int btree_node_read_all_replicas(struct bch_fs *c, struct btree *b, bool
if (sync) {
closure_sync(&ra->cl);
- btree_node_read_all_replicas_done(&ra->cl);
+ btree_node_read_all_replicas_done(&ra->cl.work);
} else {
continue_at(&ra->cl, btree_node_read_all_replicas_done,
c->io_complete_wq);
diff --git a/fs/bcachefs/btree_iter.c b/fs/bcachefs/btree_iter.c
index 6fa90bcd7016..8e0fe65f6101 100644
--- a/fs/bcachefs/btree_iter.c
+++ b/fs/bcachefs/btree_iter.c
@@ -2981,7 +2981,8 @@ struct btree_trans *__bch2_trans_get(struct bch_fs *c, unsigned fn_idx)
trans->fn_idx = fn_idx;
trans->locking_wait.task = current;
trans->journal_replay_not_finished =
- !test_bit(JOURNAL_REPLAY_DONE, &c->journal.flags);
+ unlikely(!test_bit(JOURNAL_REPLAY_DONE, &c->journal.flags)) &&
+ atomic_inc_not_zero(&c->journal_keys.ref);
closure_init_stack(&trans->ref);
s = btree_trans_stats(trans);
@@ -3098,6 +3099,9 @@ void bch2_trans_put(struct btree_trans *trans)
kfree(trans->fs_usage_deltas);
}
+ if (unlikely(trans->journal_replay_not_finished))
+ bch2_journal_keys_put(c);
+
if (trans->mem_bytes == BTREE_TRANS_MEM_MAX)
mempool_free(trans->mem, &c->btree_trans_mem_pool);
else
diff --git a/fs/bcachefs/btree_journal_iter.c b/fs/bcachefs/btree_journal_iter.c
index 58a981bcf3aa..ec52f50d249d 100644
--- a/fs/bcachefs/btree_journal_iter.c
+++ b/fs/bcachefs/btree_journal_iter.c
@@ -80,6 +80,8 @@ struct bkey_i *bch2_journal_keys_peek_upto(struct bch_fs *c, enum btree_id btree
struct journal_keys *keys = &c->journal_keys;
unsigned iters = 0;
struct journal_key *k;
+
+ BUG_ON(*idx > keys->nr);
search:
if (!*idx)
*idx = __bch2_journal_key_search(keys, btree_id, level, pos);
@@ -189,10 +191,12 @@ int bch2_journal_key_insert_take(struct bch_fs *c, enum btree_id id,
/* Since @keys was full, there was no gap: */
memcpy(new_keys.d, keys->d, sizeof(keys->d[0]) * keys->nr);
kvfree(keys->d);
- *keys = new_keys;
+ keys->d = new_keys.d;
+ keys->nr = new_keys.nr;
+ keys->size = new_keys.size;
/* And now the gap is at the end: */
- keys->gap = keys->nr;
+ keys->gap = keys->nr;
}
journal_iters_move_gap(c, keys->gap, idx);
@@ -415,10 +419,16 @@ static int journal_sort_key_cmp(const void *_l, const void *_r)
cmp_int(l->journal_offset, r->journal_offset);
}
-void bch2_journal_keys_free(struct journal_keys *keys)
+void bch2_journal_keys_put(struct bch_fs *c)
{
+ struct journal_keys *keys = &c->journal_keys;
struct journal_key *i;
+ BUG_ON(atomic_read(&keys->ref) <= 0);
+
+ if (!atomic_dec_and_test(&keys->ref))
+ return;
+
move_gap(keys->d, keys->nr, keys->size, keys->gap, keys->nr);
keys->gap = keys->nr;
@@ -429,6 +439,8 @@ void bch2_journal_keys_free(struct journal_keys *keys)
kvfree(keys->d);
keys->d = NULL;
keys->nr = keys->gap = keys->size = 0;
+
+ bch2_journal_entries_free(c);
}
static void __journal_keys_sort(struct journal_keys *keys)
diff --git a/fs/bcachefs/btree_journal_iter.h b/fs/bcachefs/btree_journal_iter.h
index 5d64e7e22f26..8ca4c100b2e3 100644
--- a/fs/bcachefs/btree_journal_iter.h
+++ b/fs/bcachefs/btree_journal_iter.h
@@ -49,7 +49,15 @@ void bch2_btree_and_journal_iter_init_node_iter(struct btree_and_journal_iter *,
struct bch_fs *,
struct btree *);
-void bch2_journal_keys_free(struct journal_keys *);
+void bch2_journal_keys_put(struct bch_fs *);
+
+static inline void bch2_journal_keys_put_initial(struct bch_fs *c)
+{
+ if (c->journal_keys.initial_ref_held)
+ bch2_journal_keys_put(c);
+ c->journal_keys.initial_ref_held = false;
+}
+
void bch2_journal_entries_free(struct bch_fs *);
int bch2_journal_keys_sort(struct bch_fs *);
diff --git a/fs/bcachefs/btree_update_interior.c b/fs/bcachefs/btree_update_interior.c
index 76f27bc9fa24..6697417273aa 100644
--- a/fs/bcachefs/btree_update_interior.c
+++ b/fs/bcachefs/btree_update_interior.c
@@ -778,9 +778,9 @@ static void btree_interior_update_work(struct work_struct *work)
}
}
-static void btree_update_set_nodes_written(struct closure *cl)
+static CLOSURE_CALLBACK(btree_update_set_nodes_written)
{
- struct btree_update *as = container_of(cl, struct btree_update, cl);
+ closure_type(as, struct btree_update, cl);
struct bch_fs *c = as->c;
mutex_lock(&c->btree_interior_update_lock);
@@ -1071,8 +1071,12 @@ bch2_btree_update_start(struct btree_trans *trans, struct btree_path *path,
break;
}
+ /*
+ * Always check for space for two keys, even if we won't have to
+ * split at prior level - it might have been a merge instead:
+ */
if (bch2_btree_node_insert_fits(c, path->l[update_level].b,
- BKEY_BTREE_PTR_U64s_MAX * (1 + split)))
+ BKEY_BTREE_PTR_U64s_MAX * 2))
break;
split = path->l[update_level].b->nr.live_u64s > BTREE_SPLIT_THRESHOLD(c);
@@ -2266,6 +2270,10 @@ int bch2_btree_node_update_key_get_iter(struct btree_trans *trans,
BUG_ON(!btree_node_hashed(b));
+ struct bch_extent_ptr *ptr;
+ bch2_bkey_drop_ptrs(bkey_i_to_s(new_key), ptr,
+ !bch2_bkey_has_device(bkey_i_to_s(&b->key), ptr->dev));
+
ret = bch2_btree_node_update_key(trans, &iter, b, new_key,
commit_flags, skip_triggers);
out:
diff --git a/fs/bcachefs/buckets.c b/fs/bcachefs/buckets.c
index 58d8c6ffd955..5a91d3189fcf 100644
--- a/fs/bcachefs/buckets.c
+++ b/fs/bcachefs/buckets.c
@@ -854,8 +854,12 @@ static int __mark_pointer(struct btree_trans *trans,
return ret;
*dst_sectors += sectors;
- *bucket_data_type = *dirty_sectors || *cached_sectors
- ? ptr_data_type : 0;
+
+ if (!*dirty_sectors && !*cached_sectors)
+ *bucket_data_type = 0;
+ else if (*bucket_data_type != BCH_DATA_stripe)
+ *bucket_data_type = ptr_data_type;
+
return 0;
}
@@ -2091,8 +2095,6 @@ int bch2_dev_buckets_resize(struct bch_fs *c, struct bch_dev *ca, u64 nbuckets)
bucket_gens->first_bucket = ca->mi.first_bucket;
bucket_gens->nbuckets = nbuckets;
- bch2_copygc_stop(c);
-
if (resize) {
down_write(&c->gc_lock);
down_write(&ca->bucket_lock);
diff --git a/fs/bcachefs/compress.c b/fs/bcachefs/compress.c
index a8b148ec2a2b..51af8ea230ed 100644
--- a/fs/bcachefs/compress.c
+++ b/fs/bcachefs/compress.c
@@ -354,8 +354,7 @@ static int attempt_compress(struct bch_fs *c,
*/
unsigned level = min((compression.level * 3) / 2, zstd_max_clevel());
ZSTD_parameters params = zstd_get_params(level, c->opts.encoded_extent_max);
- ZSTD_CCtx *ctx = zstd_init_cctx(workspace,
- zstd_cctx_workspace_bound(&params.cParams));
+ ZSTD_CCtx *ctx = zstd_init_cctx(workspace, c->zstd_workspace_size);
/*
* ZSTD requires that when we decompress we pass in the exact
@@ -371,7 +370,7 @@ static int attempt_compress(struct bch_fs *c,
size_t len = zstd_compress_cctx(ctx,
dst + 4, dst_len - 4 - 7,
src, src_len,
- &c->zstd_params);
+ &params);
if (zstd_is_error(len))
return 0;
@@ -572,6 +571,13 @@ static int __bch2_fs_compress_init(struct bch_fs *c, u64 features)
size_t decompress_workspace_size = 0;
ZSTD_parameters params = zstd_get_params(zstd_max_clevel(),
c->opts.encoded_extent_max);
+
+ /*
+ * ZSTD is lying: if we allocate the size of the workspace it says it
+ * requires, it returns memory allocation errors
+ */
+ c->zstd_workspace_size = zstd_cctx_workspace_bound(&params.cParams);
+
struct {
unsigned feature;
enum bch_compression_type type;
@@ -585,13 +591,11 @@ static int __bch2_fs_compress_init(struct bch_fs *c, u64 features)
zlib_deflate_workspacesize(MAX_WBITS, DEF_MEM_LEVEL),
zlib_inflate_workspacesize(), },
{ BCH_FEATURE_zstd, BCH_COMPRESSION_TYPE_zstd,
- zstd_cctx_workspace_bound(&params.cParams),
+ c->zstd_workspace_size,
zstd_dctx_workspace_bound() },
}, *i;
bool have_compressed = false;
- c->zstd_params = params;
-
for (i = compression_types;
i < compression_types + ARRAY_SIZE(compression_types);
i++)
diff --git a/fs/bcachefs/data_update.c b/fs/bcachefs/data_update.c
index 5ed66202c226..71aa5e59787b 100644
--- a/fs/bcachefs/data_update.c
+++ b/fs/bcachefs/data_update.c
@@ -356,7 +356,7 @@ void bch2_data_update_exit(struct data_update *update)
bch2_bio_free_pages_pool(c, &update->op.wbio.bio);
}
-void bch2_update_unwritten_extent(struct btree_trans *trans,
+static void bch2_update_unwritten_extent(struct btree_trans *trans,
struct data_update *update)
{
struct bch_fs *c = update->op.c;
@@ -436,7 +436,51 @@ void bch2_update_unwritten_extent(struct btree_trans *trans,
}
}
+int bch2_extent_drop_ptrs(struct btree_trans *trans,
+ struct btree_iter *iter,
+ struct bkey_s_c k,
+ struct data_update_opts data_opts)
+{
+ struct bch_fs *c = trans->c;
+ struct bkey_i *n;
+ int ret;
+
+ n = bch2_bkey_make_mut_noupdate(trans, k);
+ ret = PTR_ERR_OR_ZERO(n);
+ if (ret)
+ return ret;
+
+ while (data_opts.kill_ptrs) {
+ unsigned i = 0, drop = __fls(data_opts.kill_ptrs);
+ struct bch_extent_ptr *ptr;
+
+ bch2_bkey_drop_ptrs(bkey_i_to_s(n), ptr, i++ == drop);
+ data_opts.kill_ptrs ^= 1U << drop;
+ }
+
+ /*
+ * If the new extent no longer has any pointers, bch2_extent_normalize()
+ * will do the appropriate thing with it (turning it into a
+ * KEY_TYPE_error key, or just a discard if it was a cached extent)
+ */
+ bch2_extent_normalize(c, bkey_i_to_s(n));
+
+ /*
+ * Since we're not inserting through an extent iterator
+ * (BTREE_ITER_ALL_SNAPSHOTS iterators aren't extent iterators),
+ * we aren't using the extent overwrite path to delete, we're
+ * just using the normal key deletion path:
+ */
+ if (bkey_deleted(&n->k))
+ n->k.size = 0;
+
+ return bch2_trans_relock(trans) ?:
+ bch2_trans_update(trans, iter, n, BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE) ?:
+ bch2_trans_commit(trans, NULL, NULL, BTREE_INSERT_NOFAIL);
+}
+
int bch2_data_update_init(struct btree_trans *trans,
+ struct btree_iter *iter,
struct moving_context *ctxt,
struct data_update *m,
struct write_point_specifier wp,
@@ -452,7 +496,7 @@ int bch2_data_update_init(struct btree_trans *trans,
const struct bch_extent_ptr *ptr;
unsigned i, reserve_sectors = k.k->size * data_opts.extra_replicas;
unsigned ptrs_locked = 0;
- int ret;
+ int ret = 0;
bch2_bkey_buf_init(&m->k);
bch2_bkey_buf_reassemble(&m->k, c, k);
@@ -478,6 +522,8 @@ int bch2_data_update_init(struct btree_trans *trans,
bkey_for_each_ptr(ptrs, ptr)
percpu_ref_get(&bch_dev_bkey_exists(c, ptr->dev)->ref);
+ unsigned durability_have = 0, durability_removing = 0;
+
i = 0;
bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
bool locked;
@@ -489,8 +535,11 @@ int bch2_data_update_init(struct btree_trans *trans,
reserve_sectors += k.k->size;
m->op.nr_replicas += bch2_extent_ptr_desired_durability(c, &p);
- } else if (!p.ptr.cached) {
+ durability_removing += bch2_extent_ptr_desired_durability(c, &p);
+ } else if (!p.ptr.cached &&
+ !((1U << i) & m->data_opts.kill_ptrs)) {
bch2_dev_list_add_dev(&m->op.devs_have, p.ptr.dev);
+ durability_have += bch2_extent_ptr_durability(c, &p);
}
/*
@@ -529,6 +578,29 @@ int bch2_data_update_init(struct btree_trans *trans,
i++;
}
+ /*
+ * If current extent durability is less than io_opts.data_replicas,
+ * we're not trying to rereplicate the extent up to data_replicas here -
+ * unless extra_replicas was specified
+ *
+ * Increasing replication is an explicit operation triggered by
+ * rereplicate, currently, so that users don't get an unexpected -ENOSPC
+ */
+ if (durability_have >= io_opts.data_replicas) {
+ m->data_opts.kill_ptrs |= m->data_opts.rewrite_ptrs;
+ m->data_opts.rewrite_ptrs = 0;
+ /* if iter == NULL, it's just a promote */
+ if (iter)
+ ret = bch2_extent_drop_ptrs(trans, iter, k, data_opts);
+ goto done;
+ }
+
+ m->op.nr_replicas = min(durability_removing, io_opts.data_replicas - durability_have) +
+ m->data_opts.extra_replicas;
+ m->op.nr_replicas_required = m->op.nr_replicas;
+
+ BUG_ON(!m->op.nr_replicas);
+
if (reserve_sectors) {
ret = bch2_disk_reservation_add(c, &m->op.res, reserve_sectors,
m->data_opts.extra_replicas
@@ -538,14 +610,11 @@ int bch2_data_update_init(struct btree_trans *trans,
goto err;
}
- m->op.nr_replicas += m->data_opts.extra_replicas;
- m->op.nr_replicas_required = m->op.nr_replicas;
-
- BUG_ON(!m->op.nr_replicas);
+ if (bkey_extent_is_unwritten(k)) {
+ bch2_update_unwritten_extent(trans, m);
+ goto done;
+ }
- /* Special handling required: */
- if (bkey_extent_is_unwritten(k))
- return -BCH_ERR_unwritten_extent_update;
return 0;
err:
i = 0;
@@ -560,6 +629,9 @@ err:
bch2_bkey_buf_exit(&m->k, c);
bch2_bio_free_pages_pool(c, &m->op.wbio.bio);
return ret;
+done:
+ bch2_data_update_exit(m);
+ return ret ?: -BCH_ERR_data_update_done;
}
void bch2_data_update_opts_normalize(struct bkey_s_c k, struct data_update_opts *opts)
diff --git a/fs/bcachefs/data_update.h b/fs/bcachefs/data_update.h
index 9dc17b9d8379..991095bbd469 100644
--- a/fs/bcachefs/data_update.h
+++ b/fs/bcachefs/data_update.h
@@ -32,9 +32,14 @@ int bch2_data_update_index_update(struct bch_write_op *);
void bch2_data_update_read_done(struct data_update *,
struct bch_extent_crc_unpacked);
+int bch2_extent_drop_ptrs(struct btree_trans *,
+ struct btree_iter *,
+ struct bkey_s_c,
+ struct data_update_opts);
+
void bch2_data_update_exit(struct data_update *);
-void bch2_update_unwritten_extent(struct btree_trans *, struct data_update *);
-int bch2_data_update_init(struct btree_trans *, struct moving_context *,
+int bch2_data_update_init(struct btree_trans *, struct btree_iter *,
+ struct moving_context *,
struct data_update *,
struct write_point_specifier,
struct bch_io_opts, struct data_update_opts,
diff --git a/fs/bcachefs/errcode.h b/fs/bcachefs/errcode.h
index 68a1a96bb7ca..ae7910bf2228 100644
--- a/fs/bcachefs/errcode.h
+++ b/fs/bcachefs/errcode.h
@@ -162,7 +162,7 @@
x(BCH_ERR_fsck, fsck_repair_unimplemented) \
x(BCH_ERR_fsck, fsck_repair_impossible) \
x(0, restart_recovery) \
- x(0, unwritten_extent_update) \
+ x(0, data_update_done) \
x(EINVAL, device_state_not_allowed) \
x(EINVAL, member_info_missing) \
x(EINVAL, mismatched_block_size) \
@@ -210,6 +210,7 @@
x(BCH_ERR_invalid_sb, invalid_sb_members) \
x(BCH_ERR_invalid_sb, invalid_sb_disk_groups) \
x(BCH_ERR_invalid_sb, invalid_sb_replicas) \
+ x(BCH_ERR_invalid_sb, invalid_replicas_entry) \
x(BCH_ERR_invalid_sb, invalid_sb_journal) \
x(BCH_ERR_invalid_sb, invalid_sb_journal_seq_blacklist) \
x(BCH_ERR_invalid_sb, invalid_sb_crypt) \
diff --git a/fs/bcachefs/extents.c b/fs/bcachefs/extents.c
index a864de231b69..f6c92df55270 100644
--- a/fs/bcachefs/extents.c
+++ b/fs/bcachefs/extents.c
@@ -649,37 +649,31 @@ unsigned bch2_bkey_replicas(struct bch_fs *c, struct bkey_s_c k)
return replicas;
}
-unsigned bch2_extent_ptr_desired_durability(struct bch_fs *c, struct extent_ptr_decoded *p)
+static inline unsigned __extent_ptr_durability(struct bch_dev *ca, struct extent_ptr_decoded *p)
{
- struct bch_dev *ca;
-
if (p->ptr.cached)
return 0;
- ca = bch_dev_bkey_exists(c, p->ptr.dev);
-
- return ca->mi.durability +
- (p->has_ec
- ? p->ec.redundancy
- : 0);
+ return p->has_ec
+ ? p->ec.redundancy + 1
+ : ca->mi.durability;
}
-unsigned bch2_extent_ptr_durability(struct bch_fs *c, struct extent_ptr_decoded *p)
+unsigned bch2_extent_ptr_desired_durability(struct bch_fs *c, struct extent_ptr_decoded *p)
{
- struct bch_dev *ca;
+ struct bch_dev *ca = bch_dev_bkey_exists(c, p->ptr.dev);
- if (p->ptr.cached)
- return 0;
+ return __extent_ptr_durability(ca, p);
+}
- ca = bch_dev_bkey_exists(c, p->ptr.dev);
+unsigned bch2_extent_ptr_durability(struct bch_fs *c, struct extent_ptr_decoded *p)
+{
+ struct bch_dev *ca = bch_dev_bkey_exists(c, p->ptr.dev);
if (ca->mi.state == BCH_MEMBER_STATE_failed)
return 0;
- return ca->mi.durability +
- (p->has_ec
- ? p->ec.redundancy
- : 0);
+ return __extent_ptr_durability(ca, p);
}
unsigned bch2_bkey_durability(struct bch_fs *c, struct bkey_s_c k)
diff --git a/fs/bcachefs/fs-io-direct.c b/fs/bcachefs/fs-io-direct.c
index 5b42a76c4796..9a479e4de6b3 100644
--- a/fs/bcachefs/fs-io-direct.c
+++ b/fs/bcachefs/fs-io-direct.c
@@ -35,9 +35,9 @@ static void bio_check_or_release(struct bio *bio, bool check_dirty)
}
}
-static void bch2_dio_read_complete(struct closure *cl)
+static CLOSURE_CALLBACK(bch2_dio_read_complete)
{
- struct dio_read *dio = container_of(cl, struct dio_read, cl);
+ closure_type(dio, struct dio_read, cl);
dio->req->ki_complete(dio->req, dio->ret);
bio_check_or_release(&dio->rbio.bio, dio->should_dirty);
@@ -325,9 +325,9 @@ static noinline int bch2_dio_write_copy_iov(struct dio_write *dio)
return 0;
}
-static void bch2_dio_write_flush_done(struct closure *cl)
+static CLOSURE_CALLBACK(bch2_dio_write_flush_done)
{
- struct dio_write *dio = container_of(cl, struct dio_write, op.cl);
+ closure_type(dio, struct dio_write, op.cl);
struct bch_fs *c = dio->op.c;
closure_debug_destroy(cl);
diff --git a/fs/bcachefs/fs.c b/fs/bcachefs/fs.c
index 8ef817304e4a..4d51be813509 100644
--- a/fs/bcachefs/fs.c
+++ b/fs/bcachefs/fs.c
@@ -1667,8 +1667,7 @@ static int bch2_show_devname(struct seq_file *seq, struct dentry *root)
if (!first)
seq_putc(seq, ':');
first = false;
- seq_puts(seq, "/dev/");
- seq_puts(seq, ca->name);
+ seq_puts(seq, ca->disk_sb.sb_name);
}
return 0;
diff --git a/fs/bcachefs/io_read.c b/fs/bcachefs/io_read.c
index a56ed553dc15..36763865facd 100644
--- a/fs/bcachefs/io_read.c
+++ b/fs/bcachefs/io_read.c
@@ -209,7 +209,7 @@ static struct promote_op *__promote_alloc(struct btree_trans *trans,
bio = &op->write.op.wbio.bio;
bio_init(bio, NULL, bio->bi_inline_vecs, pages, 0);
- ret = bch2_data_update_init(trans, NULL, &op->write,
+ ret = bch2_data_update_init(trans, NULL, NULL, &op->write,
writepoint_hashed((unsigned long) current),
opts,
(struct data_update_opts) {
diff --git a/fs/bcachefs/io_write.c b/fs/bcachefs/io_write.c
index d704a8f829c8..8ede46b1e354 100644
--- a/fs/bcachefs/io_write.c
+++ b/fs/bcachefs/io_write.c
@@ -580,9 +580,9 @@ static inline void wp_update_state(struct write_point *wp, bool running)
__wp_update_state(wp, state);
}
-static void bch2_write_index(struct closure *cl)
+static CLOSURE_CALLBACK(bch2_write_index)
{
- struct bch_write_op *op = container_of(cl, struct bch_write_op, cl);
+ closure_type(op, struct bch_write_op, cl);
struct write_point *wp = op->wp;
struct workqueue_struct *wq = index_update_wq(op);
unsigned long flags;
@@ -1208,9 +1208,9 @@ static void __bch2_nocow_write_done(struct bch_write_op *op)
bch2_nocow_write_convert_unwritten(op);
}
-static void bch2_nocow_write_done(struct closure *cl)
+static CLOSURE_CALLBACK(bch2_nocow_write_done)
{
- struct bch_write_op *op = container_of(cl, struct bch_write_op, cl);
+ closure_type(op, struct bch_write_op, cl);
__bch2_nocow_write_done(op);
bch2_write_done(cl);
@@ -1363,7 +1363,7 @@ err:
op->insert_keys.top = op->insert_keys.keys;
} else if (op->flags & BCH_WRITE_SYNC) {
closure_sync(&op->cl);
- bch2_nocow_write_done(&op->cl);
+ bch2_nocow_write_done(&op->cl.work);
} else {
/*
* XXX
@@ -1566,9 +1566,9 @@ err:
* If op->discard is true, instead of inserting the data it invalidates the
* region of the cache represented by op->bio and op->inode.
*/
-void bch2_write(struct closure *cl)
+CLOSURE_CALLBACK(bch2_write)
{
- struct bch_write_op *op = container_of(cl, struct bch_write_op, cl);
+ closure_type(op, struct bch_write_op, cl);
struct bio *bio = &op->wbio.bio;
struct bch_fs *c = op->c;
unsigned data_len;
diff --git a/fs/bcachefs/io_write.h b/fs/bcachefs/io_write.h
index 9323167229ee..6c276a48f95d 100644
--- a/fs/bcachefs/io_write.h
+++ b/fs/bcachefs/io_write.h
@@ -90,8 +90,7 @@ static inline void bch2_write_op_init(struct bch_write_op *op, struct bch_fs *c,
op->devs_need_flush = NULL;
}
-void bch2_write(struct closure *);
-
+CLOSURE_CALLBACK(bch2_write);
void bch2_write_point_do_index_updates(struct work_struct *);
static inline struct bch_write_bio *wbio_init(struct bio *bio)
diff --git a/fs/bcachefs/journal.c b/fs/bcachefs/journal.c
index 23a9b7845d11..489b34046e78 100644
--- a/fs/bcachefs/journal.c
+++ b/fs/bcachefs/journal.c
@@ -321,6 +321,8 @@ static int journal_entry_open(struct journal *j)
atomic64_inc(&j->seq);
journal_pin_list_init(fifo_push_ref(&j->pin), 1);
+ BUG_ON(j->pin.back - 1 != atomic64_read(&j->seq));
+
BUG_ON(j->buf + (journal_cur_seq(j) & JOURNAL_BUF_MASK) != buf);
bkey_extent_init(&buf->key);
diff --git a/fs/bcachefs/journal.h b/fs/bcachefs/journal.h
index c85d01cf4948..4c513fca5ef2 100644
--- a/fs/bcachefs/journal.h
+++ b/fs/bcachefs/journal.h
@@ -136,9 +136,7 @@ static inline u64 journal_last_seq(struct journal *j)
static inline u64 journal_cur_seq(struct journal *j)
{
- EBUG_ON(j->pin.back - 1 != atomic64_read(&j->seq));
-
- return j->pin.back - 1;
+ return atomic64_read(&j->seq);
}
static inline u64 journal_last_unwritten_seq(struct journal *j)
diff --git a/fs/bcachefs/journal_io.c b/fs/bcachefs/journal_io.c
index 786a09285509..0f17fc5f8d68 100644
--- a/fs/bcachefs/journal_io.c
+++ b/fs/bcachefs/journal_io.c
@@ -547,6 +547,7 @@ static int journal_entry_data_usage_validate(struct bch_fs *c,
struct jset_entry_data_usage *u =
container_of(entry, struct jset_entry_data_usage, entry);
unsigned bytes = jset_u64s(le16_to_cpu(entry->u64s)) * sizeof(u64);
+ struct printbuf err = PRINTBUF;
int ret = 0;
if (journal_entry_err_on(bytes < sizeof(*u) ||
@@ -555,10 +556,19 @@ static int journal_entry_data_usage_validate(struct bch_fs *c,
journal_entry_data_usage_bad_size,
"invalid journal entry usage: bad size")) {
journal_entry_null_range(entry, vstruct_next(entry));
- return ret;
+ goto out;
}
+ if (journal_entry_err_on(bch2_replicas_entry_validate(&u->r, c->disk_sb.sb, &err),
+ c, version, jset, entry,
+ journal_entry_data_usage_bad_size,
+ "invalid journal entry usage: %s", err.buf)) {
+ journal_entry_null_range(entry, vstruct_next(entry));
+ goto out;
+ }
+out:
fsck_err:
+ printbuf_exit(&err);
return ret;
}
@@ -1025,10 +1035,9 @@ next_block:
return 0;
}
-static void bch2_journal_read_device(struct closure *cl)
+static CLOSURE_CALLBACK(bch2_journal_read_device)
{
- struct journal_device *ja =
- container_of(cl, struct journal_device, read);
+ closure_type(ja, struct journal_device, read);
struct bch_dev *ca = container_of(ja, struct bch_dev, journal);
struct bch_fs *c = ca->fs;
struct journal_list *jlist =
@@ -1520,9 +1529,9 @@ static inline struct journal_buf *journal_last_unwritten_buf(struct journal *j)
return j->buf + (journal_last_unwritten_seq(j) & JOURNAL_BUF_MASK);
}
-static void journal_write_done(struct closure *cl)
+static CLOSURE_CALLBACK(journal_write_done)
{
- struct journal *j = container_of(cl, struct journal, io);
+ closure_type(j, struct journal, io);
struct bch_fs *c = container_of(j, struct bch_fs, journal);
struct journal_buf *w = journal_last_unwritten_buf(j);
struct bch_replicas_padded replicas;
@@ -1638,9 +1647,9 @@ static void journal_write_endio(struct bio *bio)
percpu_ref_put(&ca->io_ref);
}
-static void do_journal_write(struct closure *cl)
+static CLOSURE_CALLBACK(do_journal_write)
{
- struct journal *j = container_of(cl, struct journal, io);
+ closure_type(j, struct journal, io);
struct bch_fs *c = container_of(j, struct bch_fs, journal);
struct bch_dev *ca;
struct journal_buf *w = journal_last_unwritten_buf(j);
@@ -1850,9 +1859,9 @@ static int bch2_journal_write_pick_flush(struct journal *j, struct journal_buf *
return 0;
}
-void bch2_journal_write(struct closure *cl)
+CLOSURE_CALLBACK(bch2_journal_write)
{
- struct journal *j = container_of(cl, struct journal, io);
+ closure_type(j, struct journal, io);
struct bch_fs *c = container_of(j, struct bch_fs, journal);
struct bch_dev *ca;
struct journal_buf *w = journal_last_unwritten_buf(j);
diff --git a/fs/bcachefs/journal_io.h b/fs/bcachefs/journal_io.h
index a88d097b13f1..c035e7c108e1 100644
--- a/fs/bcachefs/journal_io.h
+++ b/fs/bcachefs/journal_io.h
@@ -60,6 +60,6 @@ void bch2_journal_ptrs_to_text(struct printbuf *, struct bch_fs *,
int bch2_journal_read(struct bch_fs *, u64 *, u64 *, u64 *);
-void bch2_journal_write(struct closure *);
+CLOSURE_CALLBACK(bch2_journal_write);
#endif /* _BCACHEFS_JOURNAL_IO_H */
diff --git a/fs/bcachefs/move.c b/fs/bcachefs/move.c
index ab749bf2fcbc..54830ee0ed88 100644
--- a/fs/bcachefs/move.c
+++ b/fs/bcachefs/move.c
@@ -49,17 +49,6 @@ static void trace_move_extent_read2(struct bch_fs *c, struct bkey_s_c k)
}
}
-static void trace_move_extent_alloc_mem_fail2(struct bch_fs *c, struct bkey_s_c k)
-{
- if (trace_move_extent_alloc_mem_fail_enabled()) {
- struct printbuf buf = PRINTBUF;
-
- bch2_bkey_val_to_text(&buf, c, k);
- trace_move_extent_alloc_mem_fail(c, buf.buf);
- printbuf_exit(&buf);
- }
-}
-
struct moving_io {
struct list_head read_list;
struct list_head io_list;
@@ -163,12 +152,18 @@ void bch2_move_ctxt_wait_for_io(struct moving_context *ctxt)
atomic_read(&ctxt->write_sectors) != sectors_pending);
}
+static void bch2_moving_ctxt_flush_all(struct moving_context *ctxt)
+{
+ move_ctxt_wait_event(ctxt, list_empty(&ctxt->reads));
+ bch2_trans_unlock_long(ctxt->trans);
+ closure_sync(&ctxt->cl);
+}
+
void bch2_moving_ctxt_exit(struct moving_context *ctxt)
{
struct bch_fs *c = ctxt->trans->c;
- move_ctxt_wait_event(ctxt, list_empty(&ctxt->reads));
- closure_sync(&ctxt->cl);
+ bch2_moving_ctxt_flush_all(ctxt);
EBUG_ON(atomic_read(&ctxt->write_sectors));
EBUG_ON(atomic_read(&ctxt->write_ios));
@@ -223,49 +218,6 @@ void bch2_move_stats_init(struct bch_move_stats *stats, char *name)
scnprintf(stats->name, sizeof(stats->name), "%s", name);
}
-static int bch2_extent_drop_ptrs(struct btree_trans *trans,
- struct btree_iter *iter,
- struct bkey_s_c k,
- struct data_update_opts data_opts)
-{
- struct bch_fs *c = trans->c;
- struct bkey_i *n;
- int ret;
-
- n = bch2_bkey_make_mut_noupdate(trans, k);
- ret = PTR_ERR_OR_ZERO(n);
- if (ret)
- return ret;
-
- while (data_opts.kill_ptrs) {
- unsigned i = 0, drop = __fls(data_opts.kill_ptrs);
- struct bch_extent_ptr *ptr;
-
- bch2_bkey_drop_ptrs(bkey_i_to_s(n), ptr, i++ == drop);
- data_opts.kill_ptrs ^= 1U << drop;
- }
-
- /*
- * If the new extent no longer has any pointers, bch2_extent_normalize()
- * will do the appropriate thing with it (turning it into a
- * KEY_TYPE_error key, or just a discard if it was a cached extent)
- */
- bch2_extent_normalize(c, bkey_i_to_s(n));
-
- /*
- * Since we're not inserting through an extent iterator
- * (BTREE_ITER_ALL_SNAPSHOTS iterators aren't extent iterators),
- * we aren't using the extent overwrite path to delete, we're
- * just using the normal key deletion path:
- */
- if (bkey_deleted(&n->k))
- n->k.size = 0;
-
- return bch2_trans_relock(trans) ?:
- bch2_trans_update(trans, iter, n, BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE) ?:
- bch2_trans_commit(trans, NULL, NULL, BTREE_INSERT_NOFAIL);
-}
-
int bch2_move_extent(struct moving_context *ctxt,
struct move_bucket_in_flight *bucket_in_flight,
struct btree_iter *iter,
@@ -335,19 +287,11 @@ int bch2_move_extent(struct moving_context *ctxt,
io->rbio.bio.bi_iter.bi_sector = bkey_start_offset(k.k);
io->rbio.bio.bi_end_io = move_read_endio;
- ret = bch2_data_update_init(trans, ctxt, &io->write, ctxt->wp,
+ ret = bch2_data_update_init(trans, iter, ctxt, &io->write, ctxt->wp,
io_opts, data_opts, iter->btree_id, k);
- if (ret && ret != -BCH_ERR_unwritten_extent_update)
+ if (ret)
goto err_free_pages;
- if (ret == -BCH_ERR_unwritten_extent_update) {
- bch2_update_unwritten_extent(trans, &io->write);
- move_free(io);
- return 0;
- }
-
- BUG_ON(ret);
-
io->write.op.end_io = move_write_done;
if (ctxt->rate)
@@ -391,8 +335,23 @@ err_free_pages:
err_free:
kfree(io);
err:
- this_cpu_inc(c->counters[BCH_COUNTER_move_extent_alloc_mem_fail]);
- trace_move_extent_alloc_mem_fail2(c, k);
+ if (ret == -BCH_ERR_data_update_done)
+ return 0;
+
+ if (bch2_err_matches(ret, EROFS) ||
+ bch2_err_matches(ret, BCH_ERR_transaction_restart))
+ return ret;
+
+ this_cpu_inc(c->counters[BCH_COUNTER_move_extent_start_fail]);
+ if (trace_move_extent_start_fail_enabled()) {
+ struct printbuf buf = PRINTBUF;
+
+ bch2_bkey_val_to_text(&buf, c, k);
+ prt_str(&buf, ": ");
+ prt_str(&buf, bch2_err_str(ret));
+ trace_move_extent_start_fail(c, buf.buf);
+ printbuf_exit(&buf);
+ }
return ret;
}
@@ -482,37 +441,30 @@ int bch2_move_get_io_opts_one(struct btree_trans *trans,
int bch2_move_ratelimit(struct moving_context *ctxt)
{
struct bch_fs *c = ctxt->trans->c;
+ bool is_kthread = current->flags & PF_KTHREAD;
u64 delay;
- if (ctxt->wait_on_copygc && !c->copygc_running) {
- bch2_trans_unlock_long(ctxt->trans);
+ if (ctxt->wait_on_copygc && c->copygc_running) {
+ bch2_moving_ctxt_flush_all(ctxt);
wait_event_killable(c->copygc_running_wq,
!c->copygc_running ||
- kthread_should_stop());
+ (is_kthread && kthread_should_stop()));
}
do {
delay = ctxt->rate ? bch2_ratelimit_delay(ctxt->rate) : 0;
-
- if (delay) {
- if (delay > HZ / 10)
- bch2_trans_unlock_long(ctxt->trans);
- else
- bch2_trans_unlock(ctxt->trans);
- set_current_state(TASK_INTERRUPTIBLE);
- }
-
- if ((current->flags & PF_KTHREAD) && kthread_should_stop()) {
- __set_current_state(TASK_RUNNING);
+ if (is_kthread && kthread_should_stop())
return 1;
- }
if (delay)
- schedule_timeout(delay);
+ move_ctxt_wait_event_timeout(ctxt,
+ freezing(current) ||
+ (is_kthread && kthread_should_stop()),
+ delay);
if (unlikely(freezing(current))) {
- move_ctxt_wait_event(ctxt, list_empty(&ctxt->reads));
+ bch2_moving_ctxt_flush_all(ctxt);
try_to_freeze();
}
} while (delay);
@@ -683,6 +635,7 @@ int __bch2_evacuate_bucket(struct moving_context *ctxt,
{
struct btree_trans *trans = ctxt->trans;
struct bch_fs *c = trans->c;
+ bool is_kthread = current->flags & PF_KTHREAD;
struct bch_io_opts io_opts = bch2_opts_to_inode_opts(c->opts);
struct btree_iter iter;
struct bkey_buf sk;
@@ -728,6 +681,9 @@ int __bch2_evacuate_bucket(struct moving_context *ctxt,
}
while (!(ret = bch2_move_ratelimit(ctxt))) {
+ if (is_kthread && kthread_should_stop())
+ break;
+
bch2_trans_begin(trans);
ret = bch2_get_next_backpointer(trans, bucket, gen,
diff --git a/fs/bcachefs/move.h b/fs/bcachefs/move.h
index 07cf9d42643b..0906aa2d1de2 100644
--- a/fs/bcachefs/move.h
+++ b/fs/bcachefs/move.h
@@ -38,6 +38,25 @@ struct moving_context {
wait_queue_head_t wait;
};
+#define move_ctxt_wait_event_timeout(_ctxt, _cond, _timeout) \
+({ \
+ int _ret = 0; \
+ while (true) { \
+ bool cond_finished = false; \
+ bch2_moving_ctxt_do_pending_writes(_ctxt); \
+ \
+ if (_cond) \
+ break; \
+ bch2_trans_unlock_long((_ctxt)->trans); \
+ _ret = __wait_event_timeout((_ctxt)->wait, \
+ bch2_moving_ctxt_next_pending_write(_ctxt) || \
+ (cond_finished = (_cond)), _timeout); \
+ if (_ret || ( cond_finished)) \
+ break; \
+ } \
+ _ret; \
+})
+
#define move_ctxt_wait_event(_ctxt, _cond) \
do { \
bool cond_finished = false; \
diff --git a/fs/bcachefs/movinggc.c b/fs/bcachefs/movinggc.c
index 0a0576326c5b..a84e79f79e5e 100644
--- a/fs/bcachefs/movinggc.c
+++ b/fs/bcachefs/movinggc.c
@@ -207,7 +207,7 @@ static int bch2_copygc(struct moving_context *ctxt,
goto err;
darray_for_each(buckets, i) {
- if (unlikely(freezing(current)))
+ if (kthread_should_stop() || freezing(current))
break;
f = move_bucket_in_flight_add(buckets_in_flight, *i);
diff --git a/fs/bcachefs/recovery.c b/fs/bcachefs/recovery.c
index 9c30500ce920..770ced1c6285 100644
--- a/fs/bcachefs/recovery.c
+++ b/fs/bcachefs/recovery.c
@@ -167,6 +167,8 @@ static int bch2_journal_replay(struct bch_fs *c)
goto err;
}
+ BUG_ON(!atomic_read(&keys->ref));
+
for (i = 0; i < keys->nr; i++) {
k = keys_sorted[i];
@@ -188,6 +190,9 @@ static int bch2_journal_replay(struct bch_fs *c)
}
}
+ if (!c->opts.keep_journal)
+ bch2_journal_keys_put_initial(c);
+
replay_now_at(j, j->replay_journal_seq_end);
j->replay_journal_seq = 0;
@@ -909,10 +914,8 @@ out:
bch2_flush_fsck_errs(c);
if (!c->opts.keep_journal &&
- test_bit(JOURNAL_REPLAY_DONE, &c->journal.flags)) {
- bch2_journal_keys_free(&c->journal_keys);
- bch2_journal_entries_free(c);
- }
+ test_bit(JOURNAL_REPLAY_DONE, &c->journal.flags))
+ bch2_journal_keys_put_initial(c);
kfree(clean);
if (!ret && test_bit(BCH_FS_NEED_DELETE_DEAD_SNAPSHOTS, &c->flags)) {
diff --git a/fs/bcachefs/replicas.c b/fs/bcachefs/replicas.c
index 1c3ae13bfced..2008fe8bf706 100644
--- a/fs/bcachefs/replicas.c
+++ b/fs/bcachefs/replicas.c
@@ -68,6 +68,33 @@ void bch2_replicas_entry_to_text(struct printbuf *out,
prt_printf(out, "]");
}
+int bch2_replicas_entry_validate(struct bch_replicas_entry *r,
+ struct bch_sb *sb,
+ struct printbuf *err)
+{
+ if (!r->nr_devs) {
+ prt_printf(err, "no devices in entry ");
+ goto bad;
+ }
+
+ if (r->nr_required > 1 &&
+ r->nr_required >= r->nr_devs) {
+ prt_printf(err, "bad nr_required in entry ");
+ goto bad;
+ }
+
+ for (unsigned i = 0; i < r->nr_devs; i++)
+ if (!bch2_dev_exists(sb, r->devs[i])) {
+ prt_printf(err, "invalid device %u in entry ", r->devs[i]);
+ goto bad;
+ }
+
+ return 0;
+bad:
+ bch2_replicas_entry_to_text(err, r);
+ return -BCH_ERR_invalid_replicas_entry;
+}
+
void bch2_cpu_replicas_to_text(struct printbuf *out,
struct bch_replicas_cpu *r)
{
@@ -163,7 +190,8 @@ void bch2_devlist_to_replicas(struct bch_replicas_entry *e,
}
static struct bch_replicas_cpu
-cpu_replicas_add_entry(struct bch_replicas_cpu *old,
+cpu_replicas_add_entry(struct bch_fs *c,
+ struct bch_replicas_cpu *old,
struct bch_replicas_entry *new_entry)
{
unsigned i;
@@ -173,6 +201,9 @@ cpu_replicas_add_entry(struct bch_replicas_cpu *old,
replicas_entry_bytes(new_entry)),
};
+ for (i = 0; i < new_entry->nr_devs; i++)
+ BUG_ON(!bch2_dev_exists2(c, new_entry->devs[i]));
+
BUG_ON(!new_entry->data_type);
verify_replicas_entry(new_entry);
@@ -382,7 +413,7 @@ static int bch2_mark_replicas_slowpath(struct bch_fs *c,
if (c->replicas_gc.entries &&
!__replicas_has_entry(&c->replicas_gc, new_entry)) {
- new_gc = cpu_replicas_add_entry(&c->replicas_gc, new_entry);
+ new_gc = cpu_replicas_add_entry(c, &c->replicas_gc, new_entry);
if (!new_gc.entries) {
ret = -BCH_ERR_ENOMEM_cpu_replicas;
goto err;
@@ -390,7 +421,7 @@ static int bch2_mark_replicas_slowpath(struct bch_fs *c,
}
if (!__replicas_has_entry(&c->replicas, new_entry)) {
- new_r = cpu_replicas_add_entry(&c->replicas, new_entry);
+ new_r = cpu_replicas_add_entry(c, &c->replicas, new_entry);
if (!new_r.entries) {
ret = -BCH_ERR_ENOMEM_cpu_replicas;
goto err;
@@ -598,7 +629,7 @@ int bch2_replicas_set_usage(struct bch_fs *c,
if (idx < 0) {
struct bch_replicas_cpu n;
- n = cpu_replicas_add_entry(&c->replicas, r);
+ n = cpu_replicas_add_entry(c, &c->replicas, r);
if (!n.entries)
return -BCH_ERR_ENOMEM_cpu_replicas;
@@ -797,7 +828,7 @@ static int bch2_cpu_replicas_validate(struct bch_replicas_cpu *cpu_r,
struct bch_sb *sb,
struct printbuf *err)
{
- unsigned i, j;
+ unsigned i;
sort_cmp_size(cpu_r->entries,
cpu_r->nr,
@@ -808,31 +839,9 @@ static int bch2_cpu_replicas_validate(struct bch_replicas_cpu *cpu_r,
struct bch_replicas_entry *e =
cpu_replicas_entry(cpu_r, i);
- if (e->data_type >= BCH_DATA_NR) {
- prt_printf(err, "invalid data type in entry ");
- bch2_replicas_entry_to_text(err, e);
- return -BCH_ERR_invalid_sb_replicas;
- }
-
- if (!e->nr_devs) {
- prt_printf(err, "no devices in entry ");
- bch2_replicas_entry_to_text(err, e);
- return -BCH_ERR_invalid_sb_replicas;
- }
-
- if (e->nr_required > 1 &&
- e->nr_required >= e->nr_devs) {
- prt_printf(err, "bad nr_required in entry ");
- bch2_replicas_entry_to_text(err, e);
- return -BCH_ERR_invalid_sb_replicas;
- }
-
- for (j = 0; j < e->nr_devs; j++)
- if (!bch2_dev_exists(sb, e->devs[j])) {
- prt_printf(err, "invalid device %u in entry ", e->devs[j]);
- bch2_replicas_entry_to_text(err, e);
- return -BCH_ERR_invalid_sb_replicas;
- }
+ int ret = bch2_replicas_entry_validate(e, sb, err);
+ if (ret)
+ return ret;
if (i + 1 < cpu_r->nr) {
struct bch_replicas_entry *n =
diff --git a/fs/bcachefs/replicas.h b/fs/bcachefs/replicas.h
index 4887675a86f0..f70a642775d1 100644
--- a/fs/bcachefs/replicas.h
+++ b/fs/bcachefs/replicas.h
@@ -9,6 +9,8 @@
void bch2_replicas_entry_sort(struct bch_replicas_entry *);
void bch2_replicas_entry_to_text(struct printbuf *,
struct bch_replicas_entry *);
+int bch2_replicas_entry_validate(struct bch_replicas_entry *,
+ struct bch_sb *, struct printbuf *);
void bch2_cpu_replicas_to_text(struct printbuf *, struct bch_replicas_cpu *);
static inline struct bch_replicas_entry *
diff --git a/fs/bcachefs/snapshot.c b/fs/bcachefs/snapshot.c
index e9af77b384c7..5dac038f0851 100644
--- a/fs/bcachefs/snapshot.c
+++ b/fs/bcachefs/snapshot.c
@@ -959,7 +959,7 @@ static int bch2_snapshot_node_delete(struct btree_trans *trans, u32 id)
parent_id, id))
goto err;
- parent->v.children[i] = le32_to_cpu(child_id);
+ parent->v.children[i] = cpu_to_le32(child_id);
normalize_snapshot_child_pointers(&parent->v);
}
diff --git a/fs/bcachefs/super-io.c b/fs/bcachefs/super-io.c
index f4cad903f4d6..f3e12f7979d5 100644
--- a/fs/bcachefs/super-io.c
+++ b/fs/bcachefs/super-io.c
@@ -166,6 +166,7 @@ void bch2_free_super(struct bch_sb_handle *sb)
if (!IS_ERR_OR_NULL(sb->bdev))
blkdev_put(sb->bdev, sb->holder);
kfree(sb->holder);
+ kfree(sb->sb_name);
kfree(sb->sb);
memset(sb, 0, sizeof(*sb));
@@ -675,6 +676,10 @@ retry:
if (!sb->holder)
return -ENOMEM;
+ sb->sb_name = kstrdup(path, GFP_KERNEL);
+ if (!sb->sb_name)
+ return -ENOMEM;
+
#ifndef __KERNEL__
if (opt_get(*opts, direct_io) == false)
sb->mode |= BLK_OPEN_BUFFERED;
diff --git a/fs/bcachefs/super.c b/fs/bcachefs/super.c
index 24672bb31cbe..f63474c5c5a2 100644
--- a/fs/bcachefs/super.c
+++ b/fs/bcachefs/super.c
@@ -423,6 +423,18 @@ static int __bch2_fs_read_write(struct bch_fs *c, bool early)
bch2_dev_allocator_add(c, ca);
bch2_recalc_capacity(c);
+ set_bit(BCH_FS_RW, &c->flags);
+ set_bit(BCH_FS_WAS_RW, &c->flags);
+
+#ifndef BCH_WRITE_REF_DEBUG
+ percpu_ref_reinit(&c->writes);
+#else
+ for (i = 0; i < BCH_WRITE_REF_NR; i++) {
+ BUG_ON(atomic_long_read(&c->writes[i]));
+ atomic_long_inc(&c->writes[i]);
+ }
+#endif
+
ret = bch2_gc_thread_start(c);
if (ret) {
bch_err(c, "error starting gc thread");
@@ -439,24 +451,16 @@ static int __bch2_fs_read_write(struct bch_fs *c, bool early)
goto err;
}
-#ifndef BCH_WRITE_REF_DEBUG
- percpu_ref_reinit(&c->writes);
-#else
- for (i = 0; i < BCH_WRITE_REF_NR; i++) {
- BUG_ON(atomic_long_read(&c->writes[i]));
- atomic_long_inc(&c->writes[i]);
- }
-#endif
- set_bit(BCH_FS_RW, &c->flags);
- set_bit(BCH_FS_WAS_RW, &c->flags);
-
bch2_do_discards(c);
bch2_do_invalidates(c);
bch2_do_stripe_deletes(c);
bch2_do_pending_node_rewrites(c);
return 0;
err:
- __bch2_fs_read_only(c);
+ if (test_bit(BCH_FS_RW, &c->flags))
+ bch2_fs_read_only(c);
+ else
+ __bch2_fs_read_only(c);
return ret;
}
@@ -504,8 +508,8 @@ static void __bch2_fs_free(struct bch_fs *c)
bch2_io_clock_exit(&c->io_clock[WRITE]);
bch2_io_clock_exit(&c->io_clock[READ]);
bch2_fs_compress_exit(c);
- bch2_journal_keys_free(&c->journal_keys);
- bch2_journal_entries_free(c);
+ bch2_journal_keys_put_initial(c);
+ BUG_ON(atomic_read(&c->journal_keys.ref));
bch2_fs_btree_write_buffer_exit(c);
percpu_free_rwsem(&c->mark_lock);
free_percpu(c->online_reserved);
@@ -702,6 +706,8 @@ static struct bch_fs *bch2_fs_alloc(struct bch_sb *sb, struct bch_opts opts)
init_rwsem(&c->gc_lock);
mutex_init(&c->gc_gens_lock);
+ atomic_set(&c->journal_keys.ref, 1);
+ c->journal_keys.initial_ref_held = true;
for (i = 0; i < BCH_TIME_STAT_NR; i++)
bch2_time_stats_init(&c->times[i]);
diff --git a/fs/bcachefs/super_types.h b/fs/bcachefs/super_types.h
index 7dda4985b99f..9c1fd4ca2b10 100644
--- a/fs/bcachefs/super_types.h
+++ b/fs/bcachefs/super_types.h
@@ -5,6 +5,7 @@
struct bch_sb_handle {
struct bch_sb *sb;
struct block_device *bdev;
+ char *sb_name;
struct bio *bio;
void *holder;
size_t buffer_size;
diff --git a/fs/bcachefs/trace.h b/fs/bcachefs/trace.h
index 7857671159b4..fd49b63562c3 100644
--- a/fs/bcachefs/trace.h
+++ b/fs/bcachefs/trace.h
@@ -754,9 +754,9 @@ TRACE_EVENT(move_extent_fail,
TP_printk("%d:%d %s", MAJOR(__entry->dev), MINOR(__entry->dev), __get_str(msg))
);
-DEFINE_EVENT(bkey, move_extent_alloc_mem_fail,
- TP_PROTO(struct bch_fs *c, const char *k),
- TP_ARGS(c, k)
+DEFINE_EVENT(bkey, move_extent_start_fail,
+ TP_PROTO(struct bch_fs *c, const char *str),
+ TP_ARGS(c, str)
);
TRACE_EVENT(move_data,
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 401ea09ae4b8..bbcc3df77646 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -3213,6 +3213,7 @@ int __cold open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_device
goto fail_alloc;
}
+ btrfs_info(fs_info, "first mount of filesystem %pU", disk_super->fsid);
/*
* Verify the type first, if that or the checksum value are
* corrupted, we'll find out
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 03cef28d9e37..e6230a6ffa98 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -674,8 +674,8 @@ static void end_bio_extent_readpage(struct btrfs_bio *bbio)
* the array will be skipped
*
* Return: 0 if all pages were able to be allocated;
- * -ENOMEM otherwise, and the caller is responsible for freeing all
- * non-null page pointers in the array.
+ * -ENOMEM otherwise, the partially allocated pages would be freed and
+ * the array slots zeroed
*/
int btrfs_alloc_page_array(unsigned int nr_pages, struct page **page_array)
{
@@ -694,8 +694,13 @@ int btrfs_alloc_page_array(unsigned int nr_pages, struct page **page_array)
* though alloc_pages_bulk_array() falls back to alloc_page()
* if it could not bulk-allocate. So we must be out of memory.
*/
- if (allocated == last)
+ if (allocated == last) {
+ for (int i = 0; i < allocated; i++) {
+ __free_page(page_array[i]);
+ page_array[i] = NULL;
+ }
return -ENOMEM;
+ }
memalloc_retry_wait(GFP_NOFS);
}
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index dfe257e1845b..4e50b62db2a8 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -4356,6 +4356,7 @@ static int _btrfs_ioctl_send(struct inode *inode, void __user *argp, bool compat
arg->clone_sources = compat_ptr(args32.clone_sources);
arg->parent_root = args32.parent_root;
arg->flags = args32.flags;
+ arg->version = args32.version;
memcpy(arg->reserved, args32.reserved,
sizeof(args32.reserved));
#else
diff --git a/fs/btrfs/ref-verify.c b/fs/btrfs/ref-verify.c
index 1f62976bee82..6486f0d7e993 100644
--- a/fs/btrfs/ref-verify.c
+++ b/fs/btrfs/ref-verify.c
@@ -794,6 +794,7 @@ int btrfs_ref_tree_mod(struct btrfs_fs_info *fs_info,
dump_ref_action(fs_info, ra);
kfree(ref);
kfree(ra);
+ kfree(re);
goto out_unlock;
} else if (be->num_refs == 0) {
btrfs_err(fs_info,
@@ -803,6 +804,7 @@ int btrfs_ref_tree_mod(struct btrfs_fs_info *fs_info,
dump_ref_action(fs_info, ra);
kfree(ref);
kfree(ra);
+ kfree(re);
goto out_unlock;
}
diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
index 3b929f0e8f04..4e36550618e5 100644
--- a/fs/btrfs/send.c
+++ b/fs/btrfs/send.c
@@ -8158,7 +8158,7 @@ long btrfs_ioctl_send(struct inode *inode, struct btrfs_ioctl_send_args *arg)
}
sctx->send_filp = fget(arg->send_fd);
- if (!sctx->send_filp) {
+ if (!sctx->send_filp || !(sctx->send_filp->f_mode & FMODE_WRITE)) {
ret = -EBADF;
goto out;
}
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index f638dc339693..ef256b944c72 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -80,7 +80,10 @@ static int btrfs_remount(struct super_block *sb, int *flags, char *data);
static void btrfs_put_super(struct super_block *sb)
{
- close_ctree(btrfs_sb(sb));
+ struct btrfs_fs_info *fs_info = btrfs_sb(sb);
+
+ btrfs_info(fs_info, "last unmount of filesystem %pU", fs_info->fs_devices->fsid);
+ close_ctree(fs_info);
}
enum {
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index 6e63816dddcb..bfc0eb5e3b7c 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -1774,7 +1774,7 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
btrfs_release_path(path);
ret = btrfs_create_qgroup(trans, objectid);
- if (ret) {
+ if (ret && ret != -EEXIST) {
btrfs_abort_transaction(trans, ret);
goto fail;
}
diff --git a/fs/btrfs/tree-checker.c b/fs/btrfs/tree-checker.c
index a416cbea75d1..50fdc69fdddf 100644
--- a/fs/btrfs/tree-checker.c
+++ b/fs/btrfs/tree-checker.c
@@ -31,6 +31,7 @@
#include "inode-item.h"
#include "dir-item.h"
#include "raid-stripe-tree.h"
+#include "extent-tree.h"
/*
* Error message should follow the following format:
@@ -1276,6 +1277,8 @@ static int check_extent_item(struct extent_buffer *leaf,
unsigned long ptr; /* Current pointer inside inline refs */
unsigned long end; /* Extent item end */
const u32 item_size = btrfs_item_size(leaf, slot);
+ u8 last_type = 0;
+ u64 last_seq = U64_MAX;
u64 flags;
u64 generation;
u64 total_refs; /* Total refs in btrfs_extent_item */
@@ -1322,6 +1325,18 @@ static int check_extent_item(struct extent_buffer *leaf,
* 2.2) Ref type specific data
* Either using btrfs_extent_inline_ref::offset, or specific
* data structure.
+ *
+ * All above inline items should follow the order:
+ *
+ * - All btrfs_extent_inline_ref::type should be in an ascending
+ * order
+ *
+ * - Within the same type, the items should follow a descending
+ * order by their sequence number. The sequence number is
+ * determined by:
+ * * btrfs_extent_inline_ref::offset for all types other than
+ * EXTENT_DATA_REF
+ * * hash_extent_data_ref() for EXTENT_DATA_REF
*/
if (unlikely(item_size < sizeof(*ei))) {
extent_err(leaf, slot,
@@ -1403,6 +1418,7 @@ static int check_extent_item(struct extent_buffer *leaf,
struct btrfs_extent_inline_ref *iref;
struct btrfs_extent_data_ref *dref;
struct btrfs_shared_data_ref *sref;
+ u64 seq;
u64 dref_offset;
u64 inline_offset;
u8 inline_type;
@@ -1416,6 +1432,7 @@ static int check_extent_item(struct extent_buffer *leaf,
iref = (struct btrfs_extent_inline_ref *)ptr;
inline_type = btrfs_extent_inline_ref_type(leaf, iref);
inline_offset = btrfs_extent_inline_ref_offset(leaf, iref);
+ seq = inline_offset;
if (unlikely(ptr + btrfs_extent_inline_ref_size(inline_type) > end)) {
extent_err(leaf, slot,
"inline ref item overflows extent item, ptr %lu iref size %u end %lu",
@@ -1446,6 +1463,10 @@ static int check_extent_item(struct extent_buffer *leaf,
case BTRFS_EXTENT_DATA_REF_KEY:
dref = (struct btrfs_extent_data_ref *)(&iref->offset);
dref_offset = btrfs_extent_data_ref_offset(leaf, dref);
+ seq = hash_extent_data_ref(
+ btrfs_extent_data_ref_root(leaf, dref),
+ btrfs_extent_data_ref_objectid(leaf, dref),
+ btrfs_extent_data_ref_offset(leaf, dref));
if (unlikely(!IS_ALIGNED(dref_offset,
fs_info->sectorsize))) {
extent_err(leaf, slot,
@@ -1475,6 +1496,24 @@ static int check_extent_item(struct extent_buffer *leaf,
inline_type);
return -EUCLEAN;
}
+ if (inline_type < last_type) {
+ extent_err(leaf, slot,
+ "inline ref out-of-order: has type %u, prev type %u",
+ inline_type, last_type);
+ return -EUCLEAN;
+ }
+ /* Type changed, allow the sequence starts from U64_MAX again. */
+ if (inline_type > last_type)
+ last_seq = U64_MAX;
+ if (seq > last_seq) {
+ extent_err(leaf, slot,
+"inline ref out-of-order: has type %u offset %llu seq 0x%llx, prev type %u seq 0x%llx",
+ inline_type, inline_offset, seq,
+ last_type, last_seq);
+ return -EUCLEAN;
+ }
+ last_type = inline_type;
+ last_seq = seq;
ptr += btrfs_extent_inline_ref_size(inline_type);
}
/* No padding is allowed */
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index c6f16625af51..f627674b37db 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -3006,15 +3006,16 @@ struct extent_map *btrfs_get_chunk_map(struct btrfs_fs_info *fs_info,
read_unlock(&em_tree->lock);
if (!em) {
- btrfs_crit(fs_info, "unable to find logical %llu length %llu",
+ btrfs_crit(fs_info,
+ "unable to find chunk map for logical %llu length %llu",
logical, length);
return ERR_PTR(-EINVAL);
}
- if (em->start > logical || em->start + em->len < logical) {
+ if (em->start > logical || em->start + em->len <= logical) {
btrfs_crit(fs_info,
- "found a bad mapping, wanted %llu-%llu, found %llu-%llu",
- logical, length, em->start, em->start + em->len);
+ "found a bad chunk map, wanted %llu-%llu, found %llu-%llu",
+ logical, logical + length, em->start, em->start + em->len);
free_extent_map(em);
return ERR_PTR(-EINVAL);
}
diff --git a/fs/debugfs/file.c b/fs/debugfs/file.c
index c45e8c2d62e1..5063434be0fc 100644
--- a/fs/debugfs/file.c
+++ b/fs/debugfs/file.c
@@ -84,6 +84,14 @@ int debugfs_file_get(struct dentry *dentry)
struct debugfs_fsdata *fsd;
void *d_fsd;
+ /*
+ * This could only happen if some debugfs user erroneously calls
+ * debugfs_file_get() on a dentry that isn't even a file, let
+ * them know about it.
+ */
+ if (WARN_ON(!d_is_reg(dentry)))
+ return -EINVAL;
+
d_fsd = READ_ONCE(dentry->d_fsdata);
if (!((unsigned long)d_fsd & DEBUGFS_FSDATA_IS_REAL_FOPS_BIT)) {
fsd = d_fsd;
@@ -100,6 +108,8 @@ int debugfs_file_get(struct dentry *dentry)
kfree(fsd);
fsd = READ_ONCE(dentry->d_fsdata);
}
+ INIT_LIST_HEAD(&fsd->cancellations);
+ mutex_init(&fsd->cancellations_mtx);
}
/*
@@ -138,6 +148,86 @@ void debugfs_file_put(struct dentry *dentry)
}
EXPORT_SYMBOL_GPL(debugfs_file_put);
+/**
+ * debugfs_enter_cancellation - enter a debugfs cancellation
+ * @file: the file being accessed
+ * @cancellation: the cancellation object, the cancel callback
+ * inside of it must be initialized
+ *
+ * When a debugfs file is removed it needs to wait for all active
+ * operations to complete. However, the operation itself may need
+ * to wait for hardware or completion of some asynchronous process
+ * or similar. As such, it may need to be cancelled to avoid long
+ * waits or even deadlocks.
+ *
+ * This function can be used inside a debugfs handler that may
+ * need to be cancelled. As soon as this function is called, the
+ * cancellation's 'cancel' callback may be called, at which point
+ * the caller should proceed to call debugfs_leave_cancellation()
+ * and leave the debugfs handler function as soon as possible.
+ * Note that the 'cancel' callback is only ever called in the
+ * context of some kind of debugfs_remove().
+ *
+ * This function must be paired with debugfs_leave_cancellation().
+ */
+void debugfs_enter_cancellation(struct file *file,
+ struct debugfs_cancellation *cancellation)
+{
+ struct debugfs_fsdata *fsd;
+ struct dentry *dentry = F_DENTRY(file);
+
+ INIT_LIST_HEAD(&cancellation->list);
+
+ if (WARN_ON(!d_is_reg(dentry)))
+ return;
+
+ if (WARN_ON(!cancellation->cancel))
+ return;
+
+ fsd = READ_ONCE(dentry->d_fsdata);
+ if (WARN_ON(!fsd ||
+ ((unsigned long)fsd & DEBUGFS_FSDATA_IS_REAL_FOPS_BIT)))
+ return;
+
+ mutex_lock(&fsd->cancellations_mtx);
+ list_add(&cancellation->list, &fsd->cancellations);
+ mutex_unlock(&fsd->cancellations_mtx);
+
+ /* if we're already removing wake it up to cancel */
+ if (d_unlinked(dentry))
+ complete(&fsd->active_users_drained);
+}
+EXPORT_SYMBOL_GPL(debugfs_enter_cancellation);
+
+/**
+ * debugfs_leave_cancellation - leave cancellation section
+ * @file: the file being accessed
+ * @cancellation: the cancellation previously registered with
+ * debugfs_enter_cancellation()
+ *
+ * See the documentation of debugfs_enter_cancellation().
+ */
+void debugfs_leave_cancellation(struct file *file,
+ struct debugfs_cancellation *cancellation)
+{
+ struct debugfs_fsdata *fsd;
+ struct dentry *dentry = F_DENTRY(file);
+
+ if (WARN_ON(!d_is_reg(dentry)))
+ return;
+
+ fsd = READ_ONCE(dentry->d_fsdata);
+ if (WARN_ON(!fsd ||
+ ((unsigned long)fsd & DEBUGFS_FSDATA_IS_REAL_FOPS_BIT)))
+ return;
+
+ mutex_lock(&fsd->cancellations_mtx);
+ if (!list_empty(&cancellation->list))
+ list_del(&cancellation->list);
+ mutex_unlock(&fsd->cancellations_mtx);
+}
+EXPORT_SYMBOL_GPL(debugfs_leave_cancellation);
+
/*
* Only permit access to world-readable files when the kernel is locked down.
* We also need to exclude any file that has ways to write or alter it as root
diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
index 5d41765e0c77..034a617cb1a5 100644
--- a/fs/debugfs/inode.c
+++ b/fs/debugfs/inode.c
@@ -236,17 +236,25 @@ static const struct super_operations debugfs_super_operations = {
static void debugfs_release_dentry(struct dentry *dentry)
{
- void *fsd = dentry->d_fsdata;
+ struct debugfs_fsdata *fsd = dentry->d_fsdata;
- if (!((unsigned long)fsd & DEBUGFS_FSDATA_IS_REAL_FOPS_BIT))
- kfree(dentry->d_fsdata);
+ if ((unsigned long)fsd & DEBUGFS_FSDATA_IS_REAL_FOPS_BIT)
+ return;
+
+ /* check it wasn't a dir (no fsdata) or automount (no real_fops) */
+ if (fsd && fsd->real_fops) {
+ WARN_ON(!list_empty(&fsd->cancellations));
+ mutex_destroy(&fsd->cancellations_mtx);
+ }
+
+ kfree(fsd);
}
static struct vfsmount *debugfs_automount(struct path *path)
{
- debugfs_automount_t f;
- f = (debugfs_automount_t)path->dentry->d_fsdata;
- return f(path->dentry, d_inode(path->dentry)->i_private);
+ struct debugfs_fsdata *fsd = path->dentry->d_fsdata;
+
+ return fsd->automount(path->dentry, d_inode(path->dentry)->i_private);
}
static const struct dentry_operations debugfs_dops = {
@@ -634,13 +642,23 @@ struct dentry *debugfs_create_automount(const char *name,
void *data)
{
struct dentry *dentry = start_creating(name, parent);
+ struct debugfs_fsdata *fsd;
struct inode *inode;
if (IS_ERR(dentry))
return dentry;
+ fsd = kzalloc(sizeof(*fsd), GFP_KERNEL);
+ if (!fsd) {
+ failed_creating(dentry);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ fsd->automount = f;
+
if (!(debugfs_allow & DEBUGFS_ALLOW_API)) {
failed_creating(dentry);
+ kfree(fsd);
return ERR_PTR(-EPERM);
}
@@ -648,13 +666,14 @@ struct dentry *debugfs_create_automount(const char *name,
if (unlikely(!inode)) {
pr_err("out of free dentries, can not create automount '%s'\n",
name);
+ kfree(fsd);
return failed_creating(dentry);
}
make_empty_dir_inode(inode);
inode->i_flags |= S_AUTOMOUNT;
inode->i_private = data;
- dentry->d_fsdata = (void *)f;
+ dentry->d_fsdata = fsd;
/* directory inodes start off with i_nlink == 2 (for "." entry) */
inc_nlink(inode);
d_instantiate(dentry, inode);
@@ -731,8 +750,37 @@ static void __debugfs_file_removed(struct dentry *dentry)
fsd = READ_ONCE(dentry->d_fsdata);
if ((unsigned long)fsd & DEBUGFS_FSDATA_IS_REAL_FOPS_BIT)
return;
- if (!refcount_dec_and_test(&fsd->active_users))
+
+ /* if we hit zero, just wait for all to finish */
+ if (!refcount_dec_and_test(&fsd->active_users)) {
wait_for_completion(&fsd->active_users_drained);
+ return;
+ }
+
+ /* if we didn't hit zero, try to cancel any we can */
+ while (refcount_read(&fsd->active_users)) {
+ struct debugfs_cancellation *c;
+
+ /*
+ * Lock the cancellations. Note that the cancellations
+ * structs are meant to be on the stack, so we need to
+ * ensure we either use them here or don't touch them,
+ * and debugfs_leave_cancellation() will wait for this
+ * to be finished processing before exiting one. It may
+ * of course win and remove the cancellation, but then
+ * chances are we never even got into this bit, we only
+ * do if the refcount isn't zero already.
+ */
+ mutex_lock(&fsd->cancellations_mtx);
+ while ((c = list_first_entry_or_null(&fsd->cancellations,
+ typeof(*c), list))) {
+ list_del_init(&c->list);
+ c->cancel(dentry, c->cancel_data);
+ }
+ mutex_unlock(&fsd->cancellations_mtx);
+
+ wait_for_completion(&fsd->active_users_drained);
+ }
}
static void remove_one(struct dentry *victim)
diff --git a/fs/debugfs/internal.h b/fs/debugfs/internal.h
index 92af8ae31313..dae80c2a469e 100644
--- a/fs/debugfs/internal.h
+++ b/fs/debugfs/internal.h
@@ -7,6 +7,7 @@
#ifndef _DEBUGFS_INTERNAL_H_
#define _DEBUGFS_INTERNAL_H_
+#include <linux/list.h>
struct file_operations;
@@ -17,8 +18,18 @@ extern const struct file_operations debugfs_full_proxy_file_operations;
struct debugfs_fsdata {
const struct file_operations *real_fops;
- refcount_t active_users;
- struct completion active_users_drained;
+ union {
+ /* automount_fn is used when real_fops is NULL */
+ debugfs_automount_t automount;
+ struct {
+ refcount_t active_users;
+ struct completion active_users_drained;
+
+ /* protect cancellations */
+ struct mutex cancellations_mtx;
+ struct list_head cancellations;
+ };
+ };
};
/*
diff --git a/fs/ext2/file.c b/fs/ext2/file.c
index 1039e5bf90af..4ddc36f4dbd4 100644
--- a/fs/ext2/file.c
+++ b/fs/ext2/file.c
@@ -258,7 +258,6 @@ static ssize_t ext2_dio_write_iter(struct kiocb *iocb, struct iov_iter *from)
goto out_unlock;
}
- iocb->ki_pos += status;
ret += status;
endbyte = pos + status - 1;
ret2 = filemap_write_and_wait_range(inode->i_mapping, pos,
diff --git a/fs/nilfs2/sufile.c b/fs/nilfs2/sufile.c
index 2c6078a6b8ec..58ca7c936393 100644
--- a/fs/nilfs2/sufile.c
+++ b/fs/nilfs2/sufile.c
@@ -501,15 +501,38 @@ int nilfs_sufile_mark_dirty(struct inode *sufile, __u64 segnum)
down_write(&NILFS_MDT(sufile)->mi_sem);
ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 0, &bh);
- if (!ret) {
- mark_buffer_dirty(bh);
- nilfs_mdt_mark_dirty(sufile);
- kaddr = kmap_atomic(bh->b_page);
- su = nilfs_sufile_block_get_segment_usage(sufile, segnum, bh, kaddr);
+ if (ret)
+ goto out_sem;
+
+ kaddr = kmap_atomic(bh->b_page);
+ su = nilfs_sufile_block_get_segment_usage(sufile, segnum, bh, kaddr);
+ if (unlikely(nilfs_segment_usage_error(su))) {
+ struct the_nilfs *nilfs = sufile->i_sb->s_fs_info;
+
+ kunmap_atomic(kaddr);
+ brelse(bh);
+ if (nilfs_segment_is_active(nilfs, segnum)) {
+ nilfs_error(sufile->i_sb,
+ "active segment %llu is erroneous",
+ (unsigned long long)segnum);
+ } else {
+ /*
+ * Segments marked erroneous are never allocated by
+ * nilfs_sufile_alloc(); only active segments, ie,
+ * the segments indexed by ns_segnum or ns_nextnum,
+ * can be erroneous here.
+ */
+ WARN_ON_ONCE(1);
+ }
+ ret = -EIO;
+ } else {
nilfs_segment_usage_set_dirty(su);
kunmap_atomic(kaddr);
+ mark_buffer_dirty(bh);
+ nilfs_mdt_mark_dirty(sufile);
brelse(bh);
}
+out_sem:
up_write(&NILFS_MDT(sufile)->mi_sem);
return ret;
}
@@ -536,9 +559,14 @@ int nilfs_sufile_set_segment_usage(struct inode *sufile, __u64 segnum,
kaddr = kmap_atomic(bh->b_page);
su = nilfs_sufile_block_get_segment_usage(sufile, segnum, bh, kaddr);
- WARN_ON(nilfs_segment_usage_error(su));
- if (modtime)
+ if (modtime) {
+ /*
+ * Check segusage error and set su_lastmod only when updating
+ * this entry with a valid timestamp, not for cancellation.
+ */
+ WARN_ON_ONCE(nilfs_segment_usage_error(su));
su->su_lastmod = cpu_to_le64(modtime);
+ }
su->su_nblocks = cpu_to_le32(nblocks);
kunmap_atomic(kaddr);
diff --git a/fs/nilfs2/the_nilfs.c b/fs/nilfs2/the_nilfs.c
index 0f0667957c81..71400496ed36 100644
--- a/fs/nilfs2/the_nilfs.c
+++ b/fs/nilfs2/the_nilfs.c
@@ -716,7 +716,11 @@ int init_nilfs(struct the_nilfs *nilfs, struct super_block *sb, char *data)
goto failed_sbh;
}
nilfs_release_super_block(nilfs);
- sb_set_blocksize(sb, blocksize);
+ if (!sb_set_blocksize(sb, blocksize)) {
+ nilfs_err(sb, "bad blocksize %d", blocksize);
+ err = -EINVAL;
+ goto out;
+ }
err = nilfs_load_super_block(nilfs, sb, blocksize, &sbp);
if (err)
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index ef2eb12906da..435b61054b5b 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -1982,15 +1982,31 @@ static int pagemap_scan_test_walk(unsigned long start, unsigned long end,
struct pagemap_scan_private *p = walk->private;
struct vm_area_struct *vma = walk->vma;
unsigned long vma_category = 0;
+ bool wp_allowed = userfaultfd_wp_async(vma) &&
+ userfaultfd_wp_use_markers(vma);
- if (userfaultfd_wp_async(vma) && userfaultfd_wp_use_markers(vma))
- vma_category |= PAGE_IS_WPALLOWED;
- else if (p->arg.flags & PM_SCAN_CHECK_WPASYNC)
- return -EPERM;
+ if (!wp_allowed) {
+ /* User requested explicit failure over wp-async capability */
+ if (p->arg.flags & PM_SCAN_CHECK_WPASYNC)
+ return -EPERM;
+ /*
+ * User requires wr-protect, and allows silently skipping
+ * unsupported vmas.
+ */
+ if (p->arg.flags & PM_SCAN_WP_MATCHING)
+ return 1;
+ /*
+ * Then the request doesn't involve wr-protects at all,
+ * fall through to the rest checks, and allow vma walk.
+ */
+ }
if (vma->vm_flags & VM_PFNMAP)
return 1;
+ if (wp_allowed)
+ vma_category |= PAGE_IS_WPALLOWED;
+
if (!pagemap_scan_is_interesting_vma(vma_category, p))
return 1;
@@ -2140,7 +2156,7 @@ static int pagemap_scan_pmd_entry(pmd_t *pmd, unsigned long start,
return 0;
}
- if (!p->vec_out) {
+ if ((p->arg.flags & PM_SCAN_WP_MATCHING) && !p->vec_out) {
/* Fast path for performing exclusive WP */
for (addr = start; addr != end; pte++, addr += PAGE_SIZE) {
if (pte_uffd_wp(ptep_get(pte)))
diff --git a/fs/smb/client/cifsfs.c b/fs/smb/client/cifsfs.c
index ea3a7a668b45..2131638f26d0 100644
--- a/fs/smb/client/cifsfs.c
+++ b/fs/smb/client/cifsfs.c
@@ -1196,32 +1196,103 @@ const struct inode_operations cifs_symlink_inode_ops = {
.listxattr = cifs_listxattr,
};
+/*
+ * Advance the EOF marker to after the source range.
+ */
+static int cifs_precopy_set_eof(struct inode *src_inode, struct cifsInodeInfo *src_cifsi,
+ struct cifs_tcon *src_tcon,
+ unsigned int xid, loff_t src_end)
+{
+ struct cifsFileInfo *writeable_srcfile;
+ int rc = -EINVAL;
+
+ writeable_srcfile = find_writable_file(src_cifsi, FIND_WR_FSUID_ONLY);
+ if (writeable_srcfile) {
+ if (src_tcon->ses->server->ops->set_file_size)
+ rc = src_tcon->ses->server->ops->set_file_size(
+ xid, src_tcon, writeable_srcfile,
+ src_inode->i_size, true /* no need to set sparse */);
+ else
+ rc = -ENOSYS;
+ cifsFileInfo_put(writeable_srcfile);
+ cifs_dbg(FYI, "SetFSize for copychunk rc = %d\n", rc);
+ }
+
+ if (rc < 0)
+ goto set_failed;
+
+ netfs_resize_file(&src_cifsi->netfs, src_end);
+ fscache_resize_cookie(cifs_inode_cookie(src_inode), src_end);
+ return 0;
+
+set_failed:
+ return filemap_write_and_wait(src_inode->i_mapping);
+}
+
+/*
+ * Flush out either the folio that overlaps the beginning of a range in which
+ * pos resides or the folio that overlaps the end of a range unless that folio
+ * is entirely within the range we're going to invalidate. We extend the flush
+ * bounds to encompass the folio.
+ */
+static int cifs_flush_folio(struct inode *inode, loff_t pos, loff_t *_fstart, loff_t *_fend,
+ bool first)
+{
+ struct folio *folio;
+ unsigned long long fpos, fend;
+ pgoff_t index = pos / PAGE_SIZE;
+ size_t size;
+ int rc = 0;
+
+ folio = filemap_get_folio(inode->i_mapping, index);
+ if (IS_ERR(folio))
+ return 0;
+
+ size = folio_size(folio);
+ fpos = folio_pos(folio);
+ fend = fpos + size - 1;
+ *_fstart = min_t(unsigned long long, *_fstart, fpos);
+ *_fend = max_t(unsigned long long, *_fend, fend);
+ if ((first && pos == fpos) || (!first && pos == fend))
+ goto out;
+
+ rc = filemap_write_and_wait_range(inode->i_mapping, fpos, fend);
+out:
+ folio_put(folio);
+ return rc;
+}
+
static loff_t cifs_remap_file_range(struct file *src_file, loff_t off,
struct file *dst_file, loff_t destoff, loff_t len,
unsigned int remap_flags)
{
struct inode *src_inode = file_inode(src_file);
struct inode *target_inode = file_inode(dst_file);
+ struct cifsInodeInfo *src_cifsi = CIFS_I(src_inode);
+ struct cifsInodeInfo *target_cifsi = CIFS_I(target_inode);
struct cifsFileInfo *smb_file_src = src_file->private_data;
- struct cifsFileInfo *smb_file_target;
- struct cifs_tcon *target_tcon;
+ struct cifsFileInfo *smb_file_target = dst_file->private_data;
+ struct cifs_tcon *target_tcon, *src_tcon;
+ unsigned long long destend, fstart, fend, new_size;
unsigned int xid;
int rc;
- if (remap_flags & ~(REMAP_FILE_DEDUP | REMAP_FILE_ADVISORY))
+ if (remap_flags & REMAP_FILE_DEDUP)
+ return -EOPNOTSUPP;
+ if (remap_flags & ~REMAP_FILE_ADVISORY)
return -EINVAL;
cifs_dbg(FYI, "clone range\n");
xid = get_xid();
- if (!src_file->private_data || !dst_file->private_data) {
+ if (!smb_file_src || !smb_file_target) {
rc = -EBADF;
cifs_dbg(VFS, "missing cifsFileInfo on copy range src file\n");
goto out;
}
- smb_file_target = dst_file->private_data;
+ src_tcon = tlink_tcon(smb_file_src->tlink);
target_tcon = tlink_tcon(smb_file_target->tlink);
/*
@@ -1234,20 +1305,63 @@ static loff_t cifs_remap_file_range(struct file *src_file, loff_t off,
if (len == 0)
len = src_inode->i_size - off;
- cifs_dbg(FYI, "about to flush pages\n");
- /* should we flush first and last page first */
- truncate_inode_pages_range(&target_inode->i_data, destoff,
- PAGE_ALIGN(destoff + len)-1);
+ cifs_dbg(FYI, "clone range\n");
- if (target_tcon->ses->server->ops->duplicate_extents)
+ /* Flush the source buffer */
+ rc = filemap_write_and_wait_range(src_inode->i_mapping, off,
+ off + len - 1);
+ if (rc)
+ goto unlock;
+
+ /* The server-side copy will fail if the source crosses the EOF marker.
+ * Advance the EOF marker after the flush above to the end of the range
+ * if it's short of that.
+ */
+ if (src_cifsi->netfs.remote_i_size < off + len) {
+ rc = cifs_precopy_set_eof(src_inode, src_cifsi, src_tcon, xid, off + len);
+ if (rc < 0)
+ goto unlock;
+ }
+
+ new_size = destoff + len;
+ destend = destoff + len - 1;
+
+ /* Flush the folios at either end of the destination range to prevent
+ * accidental loss of dirty data outside of the range.
+ */
+ fstart = destoff;
+ fend = destend;
+
+ rc = cifs_flush_folio(target_inode, destoff, &fstart, &fend, true);
+ if (rc)
+ goto unlock;
+ rc = cifs_flush_folio(target_inode, destend, &fstart, &fend, false);
+ if (rc)
+ goto unlock;
+
+ /* Discard all the folios that overlap the destination region. */
+ cifs_dbg(FYI, "about to discard pages %llx-%llx\n", fstart, fend);
+ truncate_inode_pages_range(&target_inode->i_data, fstart, fend);
+
+ fscache_invalidate(cifs_inode_cookie(target_inode), NULL,
+ i_size_read(target_inode), 0);
+
+ rc = -EOPNOTSUPP;
+ if (target_tcon->ses->server->ops->duplicate_extents) {
rc = target_tcon->ses->server->ops->duplicate_extents(xid,
smb_file_src, smb_file_target, off, len, destoff);
- else
- rc = -EOPNOTSUPP;
+ if (rc == 0 && new_size > i_size_read(target_inode)) {
+ truncate_setsize(target_inode, new_size);
+ netfs_resize_file(&target_cifsi->netfs, new_size);
+ fscache_resize_cookie(cifs_inode_cookie(target_inode),
+ new_size);
+ }
+ }
/* force revalidate of size and timestamps of target file now
that target is updated on the server */
CIFS_I(target_inode)->time = 0;
+unlock:
/* although unlocking in the reverse order from locking is not
strictly necessary here it is a little cleaner to be consistent */
unlock_two_nondirectories(src_inode, target_inode);
@@ -1263,10 +1377,12 @@ ssize_t cifs_file_copychunk_range(unsigned int xid,
{
struct inode *src_inode = file_inode(src_file);
struct inode *target_inode = file_inode(dst_file);
+ struct cifsInodeInfo *src_cifsi = CIFS_I(src_inode);
struct cifsFileInfo *smb_file_src;
struct cifsFileInfo *smb_file_target;
struct cifs_tcon *src_tcon;
struct cifs_tcon *target_tcon;
+ unsigned long long destend, fstart, fend;
ssize_t rc;
cifs_dbg(FYI, "copychunk range\n");
@@ -1306,13 +1422,41 @@ ssize_t cifs_file_copychunk_range(unsigned int xid,
if (rc)
goto unlock;
- /* should we flush first and last page first */
- truncate_inode_pages(&target_inode->i_data, 0);
+ /* The server-side copy will fail if the source crosses the EOF marker.
+ * Advance the EOF marker after the flush above to the end of the range
+ * if it's short of that.
+ */
+ if (src_cifsi->server_eof < off + len) {
+ rc = cifs_precopy_set_eof(src_inode, src_cifsi, src_tcon, xid, off + len);
+ if (rc < 0)
+ goto unlock;
+ }
+
+ destend = destoff + len - 1;
+
+ /* Flush the folios at either end of the destination range to prevent
+ * accidental loss of dirty data outside of the range.
+ */
+ fstart = destoff;
+ fend = destend;
+
+ rc = cifs_flush_folio(target_inode, destoff, &fstart, &fend, true);
+ if (rc)
+ goto unlock;
+ rc = cifs_flush_folio(target_inode, destend, &fstart, &fend, false);
+ if (rc)
+ goto unlock;
+
+ /* Discard all the folios that overlap the destination region. */
+ truncate_inode_pages_range(&target_inode->i_data, fstart, fend);
rc = file_modified(dst_file);
- if (!rc)
+ if (!rc) {
rc = target_tcon->ses->server->ops->copychunk_range(xid,
smb_file_src, smb_file_target, off, len, destoff);
+ if (rc > 0 && destoff + rc > i_size_read(target_inode))
+ truncate_setsize(target_inode, destoff + rc);
+ }
file_accessed(src_file);
diff --git a/fs/smb/client/cifspdu.h b/fs/smb/client/cifspdu.h
index 83ccc51a54d0..c0513fbb8a59 100644
--- a/fs/smb/client/cifspdu.h
+++ b/fs/smb/client/cifspdu.h
@@ -882,11 +882,13 @@ typedef struct smb_com_open_rsp {
__u8 OplockLevel;
__u16 Fid;
__le32 CreateAction;
- __le64 CreationTime;
- __le64 LastAccessTime;
- __le64 LastWriteTime;
- __le64 ChangeTime;
- __le32 FileAttributes;
+ struct_group(common_attributes,
+ __le64 CreationTime;
+ __le64 LastAccessTime;
+ __le64 LastWriteTime;
+ __le64 ChangeTime;
+ __le32 FileAttributes;
+ );
__le64 AllocationSize;
__le64 EndOfFile;
__le16 FileType;
@@ -2264,11 +2266,13 @@ typedef struct {
/* QueryFileInfo/QueryPathinfo (also for SetPath/SetFile) data buffer formats */
/******************************************************************************/
typedef struct { /* data block encoding of response to level 263 QPathInfo */
- __le64 CreationTime;
- __le64 LastAccessTime;
- __le64 LastWriteTime;
- __le64 ChangeTime;
- __le32 Attributes;
+ struct_group(common_attributes,
+ __le64 CreationTime;
+ __le64 LastAccessTime;
+ __le64 LastWriteTime;
+ __le64 ChangeTime;
+ __le32 Attributes;
+ );
__u32 Pad1;
__le64 AllocationSize;
__le64 EndOfFile; /* size ie offset to first free byte in file */
diff --git a/fs/smb/client/cifssmb.c b/fs/smb/client/cifssmb.c
index bad91ba6c3a9..9ee348e6d106 100644
--- a/fs/smb/client/cifssmb.c
+++ b/fs/smb/client/cifssmb.c
@@ -1244,8 +1244,10 @@ openRetry:
*oplock |= CIFS_CREATE_ACTION;
if (buf) {
- /* copy from CreationTime to Attributes */
- memcpy((char *)buf, (char *)&rsp->CreationTime, 36);
+ /* copy commonly used attributes */
+ memcpy(&buf->common_attributes,
+ &rsp->common_attributes,
+ sizeof(buf->common_attributes));
/* the file_info buf is endian converted by caller */
buf->AllocationSize = rsp->AllocationSize;
buf->EndOfFile = rsp->EndOfFile;
diff --git a/fs/smb/client/connect.c b/fs/smb/client/connect.c
index f896f60c924b..9dc6dc2754c2 100644
--- a/fs/smb/client/connect.c
+++ b/fs/smb/client/connect.c
@@ -402,13 +402,7 @@ static int __cifs_reconnect(struct TCP_Server_Info *server,
spin_unlock(&server->srv_lock);
cifs_swn_reset_server_dstaddr(server);
cifs_server_unlock(server);
-
- /* increase ref count which reconnect work will drop */
- spin_lock(&cifs_tcp_ses_lock);
- server->srv_count++;
- spin_unlock(&cifs_tcp_ses_lock);
- if (mod_delayed_work(cifsiod_wq, &server->reconnect, 0))
- cifs_put_tcp_session(server, false);
+ mod_delayed_work(cifsiod_wq, &server->reconnect, 0);
}
} while (server->tcpStatus == CifsNeedReconnect);
@@ -538,13 +532,7 @@ static int reconnect_dfs_server(struct TCP_Server_Info *server)
spin_unlock(&server->srv_lock);
cifs_swn_reset_server_dstaddr(server);
cifs_server_unlock(server);
-
- /* increase ref count which reconnect work will drop */
- spin_lock(&cifs_tcp_ses_lock);
- server->srv_count++;
- spin_unlock(&cifs_tcp_ses_lock);
- if (mod_delayed_work(cifsiod_wq, &server->reconnect, 0))
- cifs_put_tcp_session(server, false);
+ mod_delayed_work(cifsiod_wq, &server->reconnect, 0);
} while (server->tcpStatus == CifsNeedReconnect);
mutex_lock(&server->refpath_lock);
@@ -1620,25 +1608,22 @@ cifs_put_tcp_session(struct TCP_Server_Info *server, int from_reconnect)
list_del_init(&server->tcp_ses_list);
spin_unlock(&cifs_tcp_ses_lock);
- /* For secondary channels, we pick up ref-count on the primary server */
- if (SERVER_IS_CHAN(server))
- cifs_put_tcp_session(server->primary_server, from_reconnect);
-
cancel_delayed_work_sync(&server->echo);
- if (from_reconnect) {
+ if (from_reconnect)
/*
* Avoid deadlock here: reconnect work calls
* cifs_put_tcp_session() at its end. Need to be sure
* that reconnect work does nothing with server pointer after
* that step.
*/
- if (cancel_delayed_work(&server->reconnect))
- cifs_put_tcp_session(server, from_reconnect);
- } else {
- if (cancel_delayed_work_sync(&server->reconnect))
- cifs_put_tcp_session(server, from_reconnect);
- }
+ cancel_delayed_work(&server->reconnect);
+ else
+ cancel_delayed_work_sync(&server->reconnect);
+
+ /* For secondary channels, we pick up ref-count on the primary server */
+ if (SERVER_IS_CHAN(server))
+ cifs_put_tcp_session(server->primary_server, from_reconnect);
spin_lock(&server->srv_lock);
server->tcpStatus = CifsExiting;
diff --git a/fs/smb/client/inode.c b/fs/smb/client/inode.c
index 47f49be69ced..09c5c0f5c96e 100644
--- a/fs/smb/client/inode.c
+++ b/fs/smb/client/inode.c
@@ -790,7 +790,7 @@ bool cifs_reparse_point_to_fattr(struct cifs_sb_info *cifs_sb,
case 0: /* SMB1 symlink */
case IO_REPARSE_TAG_SYMLINK:
case IO_REPARSE_TAG_NFS:
- fattr->cf_mode = S_IFLNK;
+ fattr->cf_mode = S_IFLNK | cifs_sb->ctx->file_mode;
fattr->cf_dtype = DT_LNK;
break;
default:
@@ -865,6 +865,8 @@ static void cifs_open_info_to_fattr(struct cifs_fattr *fattr,
out_reparse:
if (S_ISLNK(fattr->cf_mode)) {
+ if (likely(data->symlink_target))
+ fattr->cf_eof = strnlen(data->symlink_target, PATH_MAX);
fattr->cf_symlink_target = data->symlink_target;
data->symlink_target = NULL;
}
diff --git a/fs/smb/client/smb2ops.c b/fs/smb/client/smb2ops.c
index 82ab62fd0040..fcfb6566b899 100644
--- a/fs/smb/client/smb2ops.c
+++ b/fs/smb/client/smb2ops.c
@@ -2836,6 +2836,8 @@ smb2_get_dfs_refer(const unsigned int xid, struct cifs_ses *ses,
usleep_range(512, 2048);
} while (++retry_count < 5);
+ if (!rc && !dfs_rsp)
+ rc = -EIO;
if (rc) {
if (!is_retryable_error(rc) && rc != -ENOENT && rc != -EOPNOTSUPP)
cifs_tcon_dbg(VFS, "%s: ioctl error: rc=%d\n", __func__, rc);
@@ -3311,6 +3313,7 @@ static long smb3_zero_range(struct file *file, struct cifs_tcon *tcon,
struct inode *inode = file_inode(file);
struct cifsInodeInfo *cifsi = CIFS_I(inode);
struct cifsFileInfo *cfile = file->private_data;
+ unsigned long long new_size;
long rc;
unsigned int xid;
__le64 eof;
@@ -3341,10 +3344,15 @@ static long smb3_zero_range(struct file *file, struct cifs_tcon *tcon,
/*
* do we also need to change the size of the file?
*/
- if (keep_size == false && i_size_read(inode) < offset + len) {
- eof = cpu_to_le64(offset + len);
+ new_size = offset + len;
+ if (keep_size == false && (unsigned long long)i_size_read(inode) < new_size) {
+ eof = cpu_to_le64(new_size);
rc = SMB2_set_eof(xid, tcon, cfile->fid.persistent_fid,
cfile->fid.volatile_fid, cfile->pid, &eof);
+ if (rc >= 0) {
+ truncate_setsize(inode, new_size);
+ fscache_resize_cookie(cifs_inode_cookie(inode), new_size);
+ }
}
zero_range_exit:
@@ -3739,6 +3747,9 @@ static long smb3_insert_range(struct file *file, struct cifs_tcon *tcon,
if (rc < 0)
goto out_2;
+ truncate_setsize(inode, old_eof + len);
+ fscache_resize_cookie(cifs_inode_cookie(inode), i_size_read(inode));
+
rc = smb2_copychunk_range(xid, cfile, cfile, off, count, off + len);
if (rc < 0)
goto out_2;
diff --git a/fs/smb/client/smb2pdu.c b/fs/smb/client/smb2pdu.c
index 2eb29fa278c3..20634fc6d4f0 100644
--- a/fs/smb/client/smb2pdu.c
+++ b/fs/smb/client/smb2pdu.c
@@ -158,7 +158,7 @@ out:
static int
smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon,
- struct TCP_Server_Info *server)
+ struct TCP_Server_Info *server, bool from_reconnect)
{
int rc = 0;
struct nls_table *nls_codepage = NULL;
@@ -331,7 +331,7 @@ again:
* as cifs_put_tcp_session takes a higher lock
* i.e. cifs_tcp_ses_lock
*/
- cifs_put_tcp_session(server, 1);
+ cifs_put_tcp_session(server, from_reconnect);
server->terminate = true;
cifs_signal_cifsd_for_reconnect(server, false);
@@ -499,7 +499,7 @@ static int smb2_plain_req_init(__le16 smb2_command, struct cifs_tcon *tcon,
{
int rc;
- rc = smb2_reconnect(smb2_command, tcon, server);
+ rc = smb2_reconnect(smb2_command, tcon, server, false);
if (rc)
return rc;
@@ -3472,12 +3472,10 @@ __SMB2_close(const unsigned int xid, struct cifs_tcon *tcon,
} else {
trace_smb3_close_done(xid, persistent_fid, tcon->tid,
ses->Suid);
- /*
- * Note that have to subtract 4 since struct network_open_info
- * has a final 4 byte pad that close response does not have
- */
if (pbuf)
- memcpy(pbuf, (char *)&rsp->CreationTime, sizeof(*pbuf) - 4);
+ memcpy(&pbuf->network_open_info,
+ &rsp->network_open_info,
+ sizeof(pbuf->network_open_info));
}
atomic_dec(&tcon->num_remote_opens);
@@ -3897,6 +3895,15 @@ void smb2_reconnect_server(struct work_struct *work)
int rc;
bool resched = false;
+ /* first check if ref count has reached 0, if not inc ref count */
+ spin_lock(&cifs_tcp_ses_lock);
+ if (!server->srv_count) {
+ spin_unlock(&cifs_tcp_ses_lock);
+ return;
+ }
+ server->srv_count++;
+ spin_unlock(&cifs_tcp_ses_lock);
+
/* If server is a channel, select the primary channel */
pserver = SERVER_IS_CHAN(server) ? server->primary_server : server;
@@ -3954,11 +3961,10 @@ void smb2_reconnect_server(struct work_struct *work)
}
spin_unlock(&ses->chan_lock);
}
-
spin_unlock(&cifs_tcp_ses_lock);
list_for_each_entry_safe(tcon, tcon2, &tmp_list, rlist) {
- rc = smb2_reconnect(SMB2_INTERNAL_CMD, tcon, server);
+ rc = smb2_reconnect(SMB2_INTERNAL_CMD, tcon, server, true);
if (!rc)
cifs_reopen_persistent_handles(tcon);
else
@@ -3991,7 +3997,7 @@ void smb2_reconnect_server(struct work_struct *work)
/* now reconnect sessions for necessary channels */
list_for_each_entry_safe(ses, ses2, &tmp_ses_list, rlist) {
tcon->ses = ses;
- rc = smb2_reconnect(SMB2_INTERNAL_CMD, tcon, server);
+ rc = smb2_reconnect(SMB2_INTERNAL_CMD, tcon, server, true);
if (rc)
resched = true;
list_del_init(&ses->rlist);
@@ -4001,13 +4007,8 @@ void smb2_reconnect_server(struct work_struct *work)
done:
cifs_dbg(FYI, "Reconnecting tcons and channels finished\n");
- if (resched) {
+ if (resched)
queue_delayed_work(cifsiod_wq, &server->reconnect, 2 * HZ);
- mutex_unlock(&pserver->reconnect_mutex);
-
- /* no need to put tcp session as we're retrying */
- return;
- }
mutex_unlock(&pserver->reconnect_mutex);
/* now we can safely release srv struct */
@@ -4031,12 +4032,7 @@ SMB2_echo(struct TCP_Server_Info *server)
server->ops->need_neg(server)) {
spin_unlock(&server->srv_lock);
/* No need to send echo on newly established connections */
- spin_lock(&cifs_tcp_ses_lock);
- server->srv_count++;
- spin_unlock(&cifs_tcp_ses_lock);
- if (mod_delayed_work(cifsiod_wq, &server->reconnect, 0))
- cifs_put_tcp_session(server, false);
-
+ mod_delayed_work(cifsiod_wq, &server->reconnect, 0);
return rc;
}
spin_unlock(&server->srv_lock);
diff --git a/fs/smb/client/smb2pdu.h b/fs/smb/client/smb2pdu.h
index 220994d0a0f7..db08194484e0 100644
--- a/fs/smb/client/smb2pdu.h
+++ b/fs/smb/client/smb2pdu.h
@@ -319,13 +319,15 @@ struct smb2_file_reparse_point_info {
} __packed;
struct smb2_file_network_open_info {
- __le64 CreationTime;
- __le64 LastAccessTime;
- __le64 LastWriteTime;
- __le64 ChangeTime;
- __le64 AllocationSize;
- __le64 EndOfFile;
- __le32 Attributes;
+ struct_group(network_open_info,
+ __le64 CreationTime;
+ __le64 LastAccessTime;
+ __le64 LastWriteTime;
+ __le64 ChangeTime;
+ __le64 AllocationSize;
+ __le64 EndOfFile;
+ __le32 Attributes;
+ );
__le32 Reserved;
} __packed; /* level 34 Query also similar returned in close rsp and open rsp */
diff --git a/fs/smb/common/smb2pdu.h b/fs/smb/common/smb2pdu.h
index 8983f45f8430..9fbaaa387dcc 100644
--- a/fs/smb/common/smb2pdu.h
+++ b/fs/smb/common/smb2pdu.h
@@ -702,13 +702,16 @@ struct smb2_close_rsp {
__le16 StructureSize; /* 60 */
__le16 Flags;
__le32 Reserved;
- __le64 CreationTime;
- __le64 LastAccessTime;
- __le64 LastWriteTime;
- __le64 ChangeTime;
- __le64 AllocationSize; /* Beginning of FILE_STANDARD_INFO equivalent */
- __le64 EndOfFile;
- __le32 Attributes;
+ struct_group(network_open_info,
+ __le64 CreationTime;
+ __le64 LastAccessTime;
+ __le64 LastWriteTime;
+ __le64 ChangeTime;
+ /* Beginning of FILE_STANDARD_INFO equivalent */
+ __le64 AllocationSize;
+ __le64 EndOfFile;
+ __le32 Attributes;
+ );
} __packed;
diff --git a/fs/smb/server/ksmbd_work.c b/fs/smb/server/ksmbd_work.c
index a2ed441e837a..d7c676c151e2 100644
--- a/fs/smb/server/ksmbd_work.c
+++ b/fs/smb/server/ksmbd_work.c
@@ -56,6 +56,9 @@ void ksmbd_free_work_struct(struct ksmbd_work *work)
kfree(work->tr_buf);
kvfree(work->request_buf);
kfree(work->iov);
+ if (!list_empty(&work->interim_entry))
+ list_del(&work->interim_entry);
+
if (work->async_id)
ksmbd_release_id(&work->conn->async_ida, work->async_id);
kmem_cache_free(work_cache, work);
@@ -106,7 +109,7 @@ static inline void __ksmbd_iov_pin(struct ksmbd_work *work, void *ib,
static int __ksmbd_iov_pin_rsp(struct ksmbd_work *work, void *ib, int len,
void *aux_buf, unsigned int aux_size)
{
- struct aux_read *ar;
+ struct aux_read *ar = NULL;
int need_iov_cnt = 1;
if (aux_size) {
@@ -123,8 +126,11 @@ static int __ksmbd_iov_pin_rsp(struct ksmbd_work *work, void *ib, int len,
new = krealloc(work->iov,
sizeof(struct kvec) * work->iov_alloc_cnt,
GFP_KERNEL | __GFP_ZERO);
- if (!new)
+ if (!new) {
+ kfree(ar);
+ work->iov_alloc_cnt -= 4;
return -ENOMEM;
+ }
work->iov = new;
}
diff --git a/fs/smb/server/oplock.c b/fs/smb/server/oplock.c
index 9bc0103720f5..50c68beb71d6 100644
--- a/fs/smb/server/oplock.c
+++ b/fs/smb/server/oplock.c
@@ -833,7 +833,8 @@ static int smb2_lease_break_noti(struct oplock_info *opinfo)
interim_entry);
setup_async_work(in_work, NULL, NULL);
smb2_send_interim_resp(in_work, STATUS_PENDING);
- list_del(&in_work->interim_entry);
+ list_del_init(&in_work->interim_entry);
+ release_async_work(in_work);
}
INIT_WORK(&work->work, __smb2_lease_break_noti);
ksmbd_queue_work(work);
diff --git a/fs/smb/server/smb2pdu.c b/fs/smb/server/smb2pdu.c
index 658209839729..d369b98a6e10 100644
--- a/fs/smb/server/smb2pdu.c
+++ b/fs/smb/server/smb2pdu.c
@@ -657,13 +657,9 @@ smb2_get_name(const char *src, const int maxlen, struct nls_table *local_nls)
int setup_async_work(struct ksmbd_work *work, void (*fn)(void **), void **arg)
{
- struct smb2_hdr *rsp_hdr;
struct ksmbd_conn *conn = work->conn;
int id;
- rsp_hdr = ksmbd_resp_buf_next(work);
- rsp_hdr->Flags |= SMB2_FLAGS_ASYNC_COMMAND;
-
id = ksmbd_acquire_async_msg_id(&conn->async_ida);
if (id < 0) {
pr_err("Failed to alloc async message id\n");
@@ -671,7 +667,6 @@ int setup_async_work(struct ksmbd_work *work, void (*fn)(void **), void **arg)
}
work->asynchronous = true;
work->async_id = id;
- rsp_hdr->Id.AsyncId = cpu_to_le64(id);
ksmbd_debug(SMB,
"Send interim Response to inform async request id : %d\n",
@@ -723,6 +718,8 @@ void smb2_send_interim_resp(struct ksmbd_work *work, __le32 status)
__SMB2_HEADER_STRUCTURE_SIZE);
rsp_hdr = smb2_get_msg(in_work->response_buf);
+ rsp_hdr->Flags |= SMB2_FLAGS_ASYNC_COMMAND;
+ rsp_hdr->Id.AsyncId = cpu_to_le64(work->async_id);
smb2_set_err_rsp(in_work);
rsp_hdr->Status = status;
@@ -2380,7 +2377,8 @@ static int smb2_set_ea(struct smb2_ea_info *eabuf, unsigned int buf_len,
rc = 0;
} else {
rc = ksmbd_vfs_setxattr(idmap, path, attr_name, value,
- le16_to_cpu(eabuf->EaValueLength), 0);
+ le16_to_cpu(eabuf->EaValueLength),
+ 0, true);
if (rc < 0) {
ksmbd_debug(SMB,
"ksmbd_vfs_setxattr is failed(%d)\n",
@@ -2443,7 +2441,7 @@ static noinline int smb2_set_stream_name_xattr(const struct path *path,
return -EBADF;
}
- rc = ksmbd_vfs_setxattr(idmap, path, xattr_stream_name, NULL, 0, 0);
+ rc = ksmbd_vfs_setxattr(idmap, path, xattr_stream_name, NULL, 0, 0, false);
if (rc < 0)
pr_err("Failed to store XATTR stream name :%d\n", rc);
return 0;
@@ -2518,7 +2516,7 @@ static void smb2_new_xattrs(struct ksmbd_tree_connect *tcon, const struct path *
da.flags = XATTR_DOSINFO_ATTRIB | XATTR_DOSINFO_CREATE_TIME |
XATTR_DOSINFO_ITIME;
- rc = ksmbd_vfs_set_dos_attrib_xattr(mnt_idmap(path->mnt), path, &da);
+ rc = ksmbd_vfs_set_dos_attrib_xattr(mnt_idmap(path->mnt), path, &da, false);
if (rc)
ksmbd_debug(SMB, "failed to store file attribute into xattr\n");
}
@@ -2608,7 +2606,7 @@ static int smb2_create_sd_buffer(struct ksmbd_work *work,
sizeof(struct create_sd_buf_req))
return -EINVAL;
return set_info_sec(work->conn, work->tcon, path, &sd_buf->ntsd,
- le32_to_cpu(sd_buf->ccontext.DataLength), true);
+ le32_to_cpu(sd_buf->ccontext.DataLength), true, false);
}
static void ksmbd_acls_fattr(struct smb_fattr *fattr,
@@ -2690,7 +2688,7 @@ int smb2_open(struct ksmbd_work *work)
*(char *)req->Buffer == '\\') {
pr_err("not allow directory name included leading slash\n");
rc = -EINVAL;
- goto err_out1;
+ goto err_out2;
}
name = smb2_get_name(req->Buffer,
@@ -2701,7 +2699,7 @@ int smb2_open(struct ksmbd_work *work)
if (rc != -ENOMEM)
rc = -ENOENT;
name = NULL;
- goto err_out1;
+ goto err_out2;
}
ksmbd_debug(SMB, "converted name = %s\n", name);
@@ -2709,28 +2707,28 @@ int smb2_open(struct ksmbd_work *work)
if (!test_share_config_flag(work->tcon->share_conf,
KSMBD_SHARE_FLAG_STREAMS)) {
rc = -EBADF;
- goto err_out1;
+ goto err_out2;
}
rc = parse_stream_name(name, &stream_name, &s_type);
if (rc < 0)
- goto err_out1;
+ goto err_out2;
}
rc = ksmbd_validate_filename(name);
if (rc < 0)
- goto err_out1;
+ goto err_out2;
if (ksmbd_share_veto_filename(share, name)) {
rc = -ENOENT;
ksmbd_debug(SMB, "Reject open(), vetoed file: %s\n",
name);
- goto err_out1;
+ goto err_out2;
}
} else {
name = kstrdup("", GFP_KERNEL);
if (!name) {
rc = -ENOMEM;
- goto err_out1;
+ goto err_out2;
}
}
@@ -2743,14 +2741,14 @@ int smb2_open(struct ksmbd_work *work)
le32_to_cpu(req->ImpersonationLevel));
rc = -EIO;
rsp->hdr.Status = STATUS_BAD_IMPERSONATION_LEVEL;
- goto err_out1;
+ goto err_out2;
}
if (req->CreateOptions && !(req->CreateOptions & CREATE_OPTIONS_MASK_LE)) {
pr_err("Invalid create options : 0x%x\n",
le32_to_cpu(req->CreateOptions));
rc = -EINVAL;
- goto err_out1;
+ goto err_out2;
} else {
if (req->CreateOptions & FILE_SEQUENTIAL_ONLY_LE &&
req->CreateOptions & FILE_RANDOM_ACCESS_LE)
@@ -2760,13 +2758,13 @@ int smb2_open(struct ksmbd_work *work)
(FILE_OPEN_BY_FILE_ID_LE | CREATE_TREE_CONNECTION |
FILE_RESERVE_OPFILTER_LE)) {
rc = -EOPNOTSUPP;
- goto err_out1;
+ goto err_out2;
}
if (req->CreateOptions & FILE_DIRECTORY_FILE_LE) {
if (req->CreateOptions & FILE_NON_DIRECTORY_FILE_LE) {
rc = -EINVAL;
- goto err_out1;
+ goto err_out2;
} else if (req->CreateOptions & FILE_NO_COMPRESSION_LE) {
req->CreateOptions = ~(FILE_NO_COMPRESSION_LE);
}
@@ -2778,21 +2776,21 @@ int smb2_open(struct ksmbd_work *work)
pr_err("Invalid create disposition : 0x%x\n",
le32_to_cpu(req->CreateDisposition));
rc = -EINVAL;
- goto err_out1;
+ goto err_out2;
}
if (!(req->DesiredAccess & DESIRED_ACCESS_MASK)) {
pr_err("Invalid desired access : 0x%x\n",
le32_to_cpu(req->DesiredAccess));
rc = -EACCES;
- goto err_out1;
+ goto err_out2;
}
if (req->FileAttributes && !(req->FileAttributes & FILE_ATTRIBUTE_MASK_LE)) {
pr_err("Invalid file attribute : 0x%x\n",
le32_to_cpu(req->FileAttributes));
rc = -EINVAL;
- goto err_out1;
+ goto err_out2;
}
if (req->CreateContextsOffset) {
@@ -2800,19 +2798,19 @@ int smb2_open(struct ksmbd_work *work)
context = smb2_find_context_vals(req, SMB2_CREATE_EA_BUFFER, 4);
if (IS_ERR(context)) {
rc = PTR_ERR(context);
- goto err_out1;
+ goto err_out2;
} else if (context) {
ea_buf = (struct create_ea_buf_req *)context;
if (le16_to_cpu(context->DataOffset) +
le32_to_cpu(context->DataLength) <
sizeof(struct create_ea_buf_req)) {
rc = -EINVAL;
- goto err_out1;
+ goto err_out2;
}
if (req->CreateOptions & FILE_NO_EA_KNOWLEDGE_LE) {
rsp->hdr.Status = STATUS_ACCESS_DENIED;
rc = -EACCES;
- goto err_out1;
+ goto err_out2;
}
}
@@ -2820,7 +2818,7 @@ int smb2_open(struct ksmbd_work *work)
SMB2_CREATE_QUERY_MAXIMAL_ACCESS_REQUEST, 4);
if (IS_ERR(context)) {
rc = PTR_ERR(context);
- goto err_out1;
+ goto err_out2;
} else if (context) {
ksmbd_debug(SMB,
"get query maximal access context\n");
@@ -2831,11 +2829,11 @@ int smb2_open(struct ksmbd_work *work)
SMB2_CREATE_TIMEWARP_REQUEST, 4);
if (IS_ERR(context)) {
rc = PTR_ERR(context);
- goto err_out1;
+ goto err_out2;
} else if (context) {
ksmbd_debug(SMB, "get timewarp context\n");
rc = -EBADF;
- goto err_out1;
+ goto err_out2;
}
if (tcon->posix_extensions) {
@@ -2843,7 +2841,7 @@ int smb2_open(struct ksmbd_work *work)
SMB2_CREATE_TAG_POSIX, 16);
if (IS_ERR(context)) {
rc = PTR_ERR(context);
- goto err_out1;
+ goto err_out2;
} else if (context) {
struct create_posix *posix =
(struct create_posix *)context;
@@ -2851,7 +2849,7 @@ int smb2_open(struct ksmbd_work *work)
le32_to_cpu(context->DataLength) <
sizeof(struct create_posix) - 4) {
rc = -EINVAL;
- goto err_out1;
+ goto err_out2;
}
ksmbd_debug(SMB, "get posix context\n");
@@ -2863,7 +2861,7 @@ int smb2_open(struct ksmbd_work *work)
if (ksmbd_override_fsids(work)) {
rc = -ENOMEM;
- goto err_out1;
+ goto err_out2;
}
rc = ksmbd_vfs_kern_path_locked(work, name, LOOKUP_NO_SYMLINKS,
@@ -3038,7 +3036,7 @@ int smb2_open(struct ksmbd_work *work)
}
}
- rc = ksmbd_query_inode_status(d_inode(path.dentry->d_parent));
+ rc = ksmbd_query_inode_status(path.dentry->d_parent);
if (rc == KSMBD_INODE_STATUS_PENDING_DELETE) {
rc = -EBUSY;
goto err_out;
@@ -3152,7 +3150,8 @@ int smb2_open(struct ksmbd_work *work)
idmap,
&path,
pntsd,
- pntsd_size);
+ pntsd_size,
+ false);
kfree(pntsd);
if (rc)
pr_err("failed to store ntacl in xattr : %d\n",
@@ -3175,11 +3174,6 @@ int smb2_open(struct ksmbd_work *work)
fp->attrib_only = !(req->DesiredAccess & ~(FILE_READ_ATTRIBUTES_LE |
FILE_WRITE_ATTRIBUTES_LE | FILE_SYNCHRONIZE_LE));
- if (!S_ISDIR(file_inode(filp)->i_mode) && open_flags & O_TRUNC &&
- !fp->attrib_only && !stream_name) {
- smb_break_all_oplock(work, fp);
- need_truncate = 1;
- }
/* fp should be searchable through ksmbd_inode.m_fp_list
* after daccess, saccess, attrib_only, and stream are
@@ -3195,13 +3189,39 @@ int smb2_open(struct ksmbd_work *work)
goto err_out;
}
+ rc = ksmbd_vfs_getattr(&path, &stat);
+ if (rc)
+ goto err_out;
+
+ if (stat.result_mask & STATX_BTIME)
+ fp->create_time = ksmbd_UnixTimeToNT(stat.btime);
+ else
+ fp->create_time = ksmbd_UnixTimeToNT(stat.ctime);
+ if (req->FileAttributes || fp->f_ci->m_fattr == 0)
+ fp->f_ci->m_fattr =
+ cpu_to_le32(smb2_get_dos_mode(&stat, le32_to_cpu(req->FileAttributes)));
+
+ if (!created)
+ smb2_update_xattrs(tcon, &path, fp);
+ else
+ smb2_new_xattrs(tcon, &path, fp);
+
+ if (file_present || created)
+ ksmbd_vfs_kern_path_unlock(&parent_path, &path);
+
+ if (!S_ISDIR(file_inode(filp)->i_mode) && open_flags & O_TRUNC &&
+ !fp->attrib_only && !stream_name) {
+ smb_break_all_oplock(work, fp);
+ need_truncate = 1;
+ }
+
share_ret = ksmbd_smb_check_shared_mode(fp->filp, fp);
if (!test_share_config_flag(work->tcon->share_conf, KSMBD_SHARE_FLAG_OPLOCKS) ||
(req_op_level == SMB2_OPLOCK_LEVEL_LEASE &&
!(conn->vals->capabilities & SMB2_GLOBAL_CAP_LEASING))) {
if (share_ret < 0 && !S_ISDIR(file_inode(fp->filp)->i_mode)) {
rc = share_ret;
- goto err_out;
+ goto err_out1;
}
} else {
if (req_op_level == SMB2_OPLOCK_LEVEL_LEASE) {
@@ -3211,7 +3231,7 @@ int smb2_open(struct ksmbd_work *work)
name, req_op_level, lc->req_state);
rc = find_same_lease_key(sess, fp->f_ci, lc);
if (rc)
- goto err_out;
+ goto err_out1;
} else if (open_flags == O_RDONLY &&
(req_op_level == SMB2_OPLOCK_LEVEL_BATCH ||
req_op_level == SMB2_OPLOCK_LEVEL_EXCLUSIVE))
@@ -3222,16 +3242,16 @@ int smb2_open(struct ksmbd_work *work)
le32_to_cpu(req->hdr.Id.SyncId.TreeId),
lc, share_ret);
if (rc < 0)
- goto err_out;
+ goto err_out1;
}
if (req->CreateOptions & FILE_DELETE_ON_CLOSE_LE)
ksmbd_fd_set_delete_on_close(fp, file_info);
if (need_truncate) {
- rc = smb2_create_truncate(&path);
+ rc = smb2_create_truncate(&fp->filp->f_path);
if (rc)
- goto err_out;
+ goto err_out1;
}
if (req->CreateContextsOffset) {
@@ -3241,7 +3261,7 @@ int smb2_open(struct ksmbd_work *work)
SMB2_CREATE_ALLOCATION_SIZE, 4);
if (IS_ERR(az_req)) {
rc = PTR_ERR(az_req);
- goto err_out;
+ goto err_out1;
} else if (az_req) {
loff_t alloc_size;
int err;
@@ -3250,7 +3270,7 @@ int smb2_open(struct ksmbd_work *work)
le32_to_cpu(az_req->ccontext.DataLength) <
sizeof(struct create_alloc_size_req)) {
rc = -EINVAL;
- goto err_out;
+ goto err_out1;
}
alloc_size = le64_to_cpu(az_req->AllocationSize);
ksmbd_debug(SMB,
@@ -3268,30 +3288,13 @@ int smb2_open(struct ksmbd_work *work)
context = smb2_find_context_vals(req, SMB2_CREATE_QUERY_ON_DISK_ID, 4);
if (IS_ERR(context)) {
rc = PTR_ERR(context);
- goto err_out;
+ goto err_out1;
} else if (context) {
ksmbd_debug(SMB, "get query on disk id context\n");
query_disk_id = 1;
}
}
- rc = ksmbd_vfs_getattr(&path, &stat);
- if (rc)
- goto err_out;
-
- if (stat.result_mask & STATX_BTIME)
- fp->create_time = ksmbd_UnixTimeToNT(stat.btime);
- else
- fp->create_time = ksmbd_UnixTimeToNT(stat.ctime);
- if (req->FileAttributes || fp->f_ci->m_fattr == 0)
- fp->f_ci->m_fattr =
- cpu_to_le32(smb2_get_dos_mode(&stat, le32_to_cpu(req->FileAttributes)));
-
- if (!created)
- smb2_update_xattrs(tcon, &path, fp);
- else
- smb2_new_xattrs(tcon, &path, fp);
-
memcpy(fp->client_guid, conn->ClientGUID, SMB2_CLIENT_GUID_SIZE);
rsp->StructureSize = cpu_to_le16(89);
@@ -3398,13 +3401,13 @@ int smb2_open(struct ksmbd_work *work)
}
err_out:
- if (file_present || created) {
- inode_unlock(d_inode(parent_path.dentry));
- path_put(&path);
- path_put(&parent_path);
- }
- ksmbd_revert_fsids(work);
+ if (rc && (file_present || created))
+ ksmbd_vfs_kern_path_unlock(&parent_path, &path);
+
err_out1:
+ ksmbd_revert_fsids(work);
+
+err_out2:
if (!rc) {
ksmbd_update_fstate(&work->sess->file_table, fp, FP_INITED);
rc = ksmbd_iov_pin_rsp(work, (void *)rsp, iov_len);
@@ -5537,7 +5540,7 @@ static int smb2_rename(struct ksmbd_work *work,
rc = ksmbd_vfs_setxattr(file_mnt_idmap(fp->filp),
&fp->filp->f_path,
xattr_stream_name,
- NULL, 0, 0);
+ NULL, 0, 0, true);
if (rc < 0) {
pr_err("failed to store stream name in xattr: %d\n",
rc);
@@ -5630,11 +5633,9 @@ static int smb2_create_link(struct ksmbd_work *work,
if (rc)
rc = -EINVAL;
out:
- if (file_present) {
- inode_unlock(d_inode(parent_path.dentry));
- path_put(&path);
- path_put(&parent_path);
- }
+ if (file_present)
+ ksmbd_vfs_kern_path_unlock(&parent_path, &path);
+
if (!IS_ERR(link_name))
kfree(link_name);
kfree(pathname);
@@ -5701,7 +5702,8 @@ static int set_file_basic_info(struct ksmbd_file *fp,
da.flags = XATTR_DOSINFO_ATTRIB | XATTR_DOSINFO_CREATE_TIME |
XATTR_DOSINFO_ITIME;
- rc = ksmbd_vfs_set_dos_attrib_xattr(idmap, &filp->f_path, &da);
+ rc = ksmbd_vfs_set_dos_attrib_xattr(idmap, &filp->f_path, &da,
+ true);
if (rc)
ksmbd_debug(SMB,
"failed to restore file attribute in EA\n");
@@ -6013,7 +6015,7 @@ static int smb2_set_info_sec(struct ksmbd_file *fp, int addition_info,
fp->saccess |= FILE_SHARE_DELETE_LE;
return set_info_sec(fp->conn, fp->tcon, &fp->filp->f_path, pntsd,
- buf_len, false);
+ buf_len, false, true);
}
/**
@@ -7582,7 +7584,8 @@ static inline int fsctl_set_sparse(struct ksmbd_work *work, u64 id,
da.attr = le32_to_cpu(fp->f_ci->m_fattr);
ret = ksmbd_vfs_set_dos_attrib_xattr(idmap,
- &fp->filp->f_path, &da);
+ &fp->filp->f_path,
+ &da, true);
if (ret)
fp->f_ci->m_fattr = old_fattr;
}
@@ -8231,7 +8234,6 @@ static void smb21_lease_break_ack(struct ksmbd_work *work)
return;
err_out:
- opinfo->op_state = OPLOCK_STATE_NONE;
wake_up_interruptible_all(&opinfo->oplock_q);
atomic_dec(&opinfo->breaking_cnt);
wake_up_interruptible_all(&opinfo->oplock_brk);
diff --git a/fs/smb/server/smbacl.c b/fs/smb/server/smbacl.c
index 51b8bfab7481..1164365533f0 100644
--- a/fs/smb/server/smbacl.c
+++ b/fs/smb/server/smbacl.c
@@ -1185,7 +1185,7 @@ pass:
pntsd_size += sizeof(struct smb_acl) + nt_size;
}
- ksmbd_vfs_set_sd_xattr(conn, idmap, path, pntsd, pntsd_size);
+ ksmbd_vfs_set_sd_xattr(conn, idmap, path, pntsd, pntsd_size, false);
kfree(pntsd);
}
@@ -1377,7 +1377,7 @@ err_out:
int set_info_sec(struct ksmbd_conn *conn, struct ksmbd_tree_connect *tcon,
const struct path *path, struct smb_ntsd *pntsd, int ntsd_len,
- bool type_check)
+ bool type_check, bool get_write)
{
int rc;
struct smb_fattr fattr = {{0}};
@@ -1437,7 +1437,8 @@ int set_info_sec(struct ksmbd_conn *conn, struct ksmbd_tree_connect *tcon,
if (test_share_config_flag(tcon->share_conf, KSMBD_SHARE_FLAG_ACL_XATTR)) {
/* Update WinACL in xattr */
ksmbd_vfs_remove_sd_xattrs(idmap, path);
- ksmbd_vfs_set_sd_xattr(conn, idmap, path, pntsd, ntsd_len);
+ ksmbd_vfs_set_sd_xattr(conn, idmap, path, pntsd, ntsd_len,
+ get_write);
}
out:
diff --git a/fs/smb/server/smbacl.h b/fs/smb/server/smbacl.h
index 49a8c292bd2e..2b52861707d8 100644
--- a/fs/smb/server/smbacl.h
+++ b/fs/smb/server/smbacl.h
@@ -207,7 +207,7 @@ int smb_check_perm_dacl(struct ksmbd_conn *conn, const struct path *path,
__le32 *pdaccess, int uid);
int set_info_sec(struct ksmbd_conn *conn, struct ksmbd_tree_connect *tcon,
const struct path *path, struct smb_ntsd *pntsd, int ntsd_len,
- bool type_check);
+ bool type_check, bool get_write);
void id_to_sid(unsigned int cid, uint sidtype, struct smb_sid *ssid);
void ksmbd_init_domain(u32 *sub_auth);
diff --git a/fs/smb/server/vfs.c b/fs/smb/server/vfs.c
index c53dea5598fc..9091dcd7a310 100644
--- a/fs/smb/server/vfs.c
+++ b/fs/smb/server/vfs.c
@@ -97,6 +97,13 @@ static int ksmbd_vfs_path_lookup_locked(struct ksmbd_share_config *share_conf,
return -ENOENT;
}
+ err = mnt_want_write(parent_path->mnt);
+ if (err) {
+ path_put(parent_path);
+ putname(filename);
+ return -ENOENT;
+ }
+
inode_lock_nested(parent_path->dentry->d_inode, I_MUTEX_PARENT);
d = lookup_one_qstr_excl(&last, parent_path->dentry, 0);
if (IS_ERR(d))
@@ -123,6 +130,7 @@ static int ksmbd_vfs_path_lookup_locked(struct ksmbd_share_config *share_conf,
err_out:
inode_unlock(d_inode(parent_path->dentry));
+ mnt_drop_write(parent_path->mnt);
path_put(parent_path);
putname(filename);
return -ENOENT;
@@ -451,7 +459,8 @@ static int ksmbd_vfs_stream_write(struct ksmbd_file *fp, char *buf, loff_t *pos,
fp->stream.name,
(void *)stream_buf,
size,
- 0);
+ 0,
+ true);
if (err < 0)
goto out;
@@ -593,10 +602,6 @@ int ksmbd_vfs_remove_file(struct ksmbd_work *work, const struct path *path)
goto out_err;
}
- err = mnt_want_write(path->mnt);
- if (err)
- goto out_err;
-
idmap = mnt_idmap(path->mnt);
if (S_ISDIR(d_inode(path->dentry)->i_mode)) {
err = vfs_rmdir(idmap, d_inode(parent), path->dentry);
@@ -607,7 +612,6 @@ int ksmbd_vfs_remove_file(struct ksmbd_work *work, const struct path *path)
if (err)
ksmbd_debug(VFS, "unlink failed, err %d\n", err);
}
- mnt_drop_write(path->mnt);
out_err:
ksmbd_revert_fsids(work);
@@ -715,7 +719,7 @@ retry:
goto out3;
}
- parent_fp = ksmbd_lookup_fd_inode(d_inode(old_child->d_parent));
+ parent_fp = ksmbd_lookup_fd_inode(old_child->d_parent);
if (parent_fp) {
if (parent_fp->daccess & FILE_DELETE_LE) {
pr_err("parent dir is opened with delete access\n");
@@ -907,18 +911,22 @@ ssize_t ksmbd_vfs_getxattr(struct mnt_idmap *idmap,
* @attr_value: xattr value to set
* @attr_size: size of xattr value
* @flags: destination buffer length
+ * @get_write: get write access to a mount
*
* Return: 0 on success, otherwise error
*/
int ksmbd_vfs_setxattr(struct mnt_idmap *idmap,
const struct path *path, const char *attr_name,
- void *attr_value, size_t attr_size, int flags)
+ void *attr_value, size_t attr_size, int flags,
+ bool get_write)
{
int err;
- err = mnt_want_write(path->mnt);
- if (err)
- return err;
+ if (get_write == true) {
+ err = mnt_want_write(path->mnt);
+ if (err)
+ return err;
+ }
err = vfs_setxattr(idmap,
path->dentry,
@@ -928,7 +936,8 @@ int ksmbd_vfs_setxattr(struct mnt_idmap *idmap,
flags);
if (err)
ksmbd_debug(VFS, "setxattr failed, err %d\n", err);
- mnt_drop_write(path->mnt);
+ if (get_write == true)
+ mnt_drop_write(path->mnt);
return err;
}
@@ -1252,6 +1261,13 @@ out1:
}
if (!err) {
+ err = mnt_want_write(parent_path->mnt);
+ if (err) {
+ path_put(path);
+ path_put(parent_path);
+ return err;
+ }
+
err = ksmbd_vfs_lock_parent(parent_path->dentry, path->dentry);
if (err) {
path_put(path);
@@ -1261,6 +1277,14 @@ out1:
return err;
}
+void ksmbd_vfs_kern_path_unlock(struct path *parent_path, struct path *path)
+{
+ inode_unlock(d_inode(parent_path->dentry));
+ mnt_drop_write(parent_path->mnt);
+ path_put(path);
+ path_put(parent_path);
+}
+
struct dentry *ksmbd_vfs_kern_path_create(struct ksmbd_work *work,
const char *name,
unsigned int flags,
@@ -1415,7 +1439,8 @@ out:
int ksmbd_vfs_set_sd_xattr(struct ksmbd_conn *conn,
struct mnt_idmap *idmap,
const struct path *path,
- struct smb_ntsd *pntsd, int len)
+ struct smb_ntsd *pntsd, int len,
+ bool get_write)
{
int rc;
struct ndr sd_ndr = {0}, acl_ndr = {0};
@@ -1475,7 +1500,7 @@ int ksmbd_vfs_set_sd_xattr(struct ksmbd_conn *conn,
rc = ksmbd_vfs_setxattr(idmap, path,
XATTR_NAME_SD, sd_ndr.data,
- sd_ndr.offset, 0);
+ sd_ndr.offset, 0, get_write);
if (rc < 0)
pr_err("Failed to store XATTR ntacl :%d\n", rc);
@@ -1564,7 +1589,8 @@ free_n_data:
int ksmbd_vfs_set_dos_attrib_xattr(struct mnt_idmap *idmap,
const struct path *path,
- struct xattr_dos_attrib *da)
+ struct xattr_dos_attrib *da,
+ bool get_write)
{
struct ndr n;
int err;
@@ -1574,7 +1600,7 @@ int ksmbd_vfs_set_dos_attrib_xattr(struct mnt_idmap *idmap,
return err;
err = ksmbd_vfs_setxattr(idmap, path, XATTR_NAME_DOS_ATTRIBUTE,
- (void *)n.data, n.offset, 0);
+ (void *)n.data, n.offset, 0, get_write);
if (err)
ksmbd_debug(SMB, "failed to store dos attribute in xattr\n");
kfree(n.data);
@@ -1846,10 +1872,6 @@ int ksmbd_vfs_set_init_posix_acl(struct mnt_idmap *idmap,
}
posix_state_to_acl(&acl_state, acls->a_entries);
- rc = mnt_want_write(path->mnt);
- if (rc)
- goto out_err;
-
rc = set_posix_acl(idmap, dentry, ACL_TYPE_ACCESS, acls);
if (rc < 0)
ksmbd_debug(SMB, "Set posix acl(ACL_TYPE_ACCESS) failed, rc : %d\n",
@@ -1861,9 +1883,7 @@ int ksmbd_vfs_set_init_posix_acl(struct mnt_idmap *idmap,
ksmbd_debug(SMB, "Set posix acl(ACL_TYPE_DEFAULT) failed, rc : %d\n",
rc);
}
- mnt_drop_write(path->mnt);
-out_err:
free_acl_state(&acl_state);
posix_acl_release(acls);
return rc;
@@ -1893,10 +1913,6 @@ int ksmbd_vfs_inherit_posix_acl(struct mnt_idmap *idmap,
}
}
- rc = mnt_want_write(path->mnt);
- if (rc)
- goto out_err;
-
rc = set_posix_acl(idmap, dentry, ACL_TYPE_ACCESS, acls);
if (rc < 0)
ksmbd_debug(SMB, "Set posix acl(ACL_TYPE_ACCESS) failed, rc : %d\n",
@@ -1908,9 +1924,7 @@ int ksmbd_vfs_inherit_posix_acl(struct mnt_idmap *idmap,
ksmbd_debug(SMB, "Set posix acl(ACL_TYPE_DEFAULT) failed, rc : %d\n",
rc);
}
- mnt_drop_write(path->mnt);
-out_err:
posix_acl_release(acls);
return rc;
}
diff --git a/fs/smb/server/vfs.h b/fs/smb/server/vfs.h
index 00968081856e..cfe1c8092f23 100644
--- a/fs/smb/server/vfs.h
+++ b/fs/smb/server/vfs.h
@@ -109,7 +109,8 @@ ssize_t ksmbd_vfs_casexattr_len(struct mnt_idmap *idmap,
int attr_name_len);
int ksmbd_vfs_setxattr(struct mnt_idmap *idmap,
const struct path *path, const char *attr_name,
- void *attr_value, size_t attr_size, int flags);
+ void *attr_value, size_t attr_size, int flags,
+ bool get_write);
int ksmbd_vfs_xattr_stream_name(char *stream_name, char **xattr_stream_name,
size_t *xattr_stream_name_size, int s_type);
int ksmbd_vfs_remove_xattr(struct mnt_idmap *idmap,
@@ -117,6 +118,7 @@ int ksmbd_vfs_remove_xattr(struct mnt_idmap *idmap,
int ksmbd_vfs_kern_path_locked(struct ksmbd_work *work, char *name,
unsigned int flags, struct path *parent_path,
struct path *path, bool caseless);
+void ksmbd_vfs_kern_path_unlock(struct path *parent_path, struct path *path);
struct dentry *ksmbd_vfs_kern_path_create(struct ksmbd_work *work,
const char *name,
unsigned int flags,
@@ -144,14 +146,16 @@ int ksmbd_vfs_remove_sd_xattrs(struct mnt_idmap *idmap, const struct path *path)
int ksmbd_vfs_set_sd_xattr(struct ksmbd_conn *conn,
struct mnt_idmap *idmap,
const struct path *path,
- struct smb_ntsd *pntsd, int len);
+ struct smb_ntsd *pntsd, int len,
+ bool get_write);
int ksmbd_vfs_get_sd_xattr(struct ksmbd_conn *conn,
struct mnt_idmap *idmap,
struct dentry *dentry,
struct smb_ntsd **pntsd);
int ksmbd_vfs_set_dos_attrib_xattr(struct mnt_idmap *idmap,
const struct path *path,
- struct xattr_dos_attrib *da);
+ struct xattr_dos_attrib *da,
+ bool get_write);
int ksmbd_vfs_get_dos_attrib_xattr(struct mnt_idmap *idmap,
struct dentry *dentry,
struct xattr_dos_attrib *da);
diff --git a/fs/smb/server/vfs_cache.c b/fs/smb/server/vfs_cache.c
index c91eac6514dd..ddf233994ddb 100644
--- a/fs/smb/server/vfs_cache.c
+++ b/fs/smb/server/vfs_cache.c
@@ -66,14 +66,14 @@ static unsigned long inode_hash(struct super_block *sb, unsigned long hashval)
return tmp & inode_hash_mask;
}
-static struct ksmbd_inode *__ksmbd_inode_lookup(struct inode *inode)
+static struct ksmbd_inode *__ksmbd_inode_lookup(struct dentry *de)
{
struct hlist_head *head = inode_hashtable +
- inode_hash(inode->i_sb, inode->i_ino);
+ inode_hash(d_inode(de)->i_sb, (unsigned long)de);
struct ksmbd_inode *ci = NULL, *ret_ci = NULL;
hlist_for_each_entry(ci, head, m_hash) {
- if (ci->m_inode == inode) {
+ if (ci->m_de == de) {
if (atomic_inc_not_zero(&ci->m_count))
ret_ci = ci;
break;
@@ -84,26 +84,16 @@ static struct ksmbd_inode *__ksmbd_inode_lookup(struct inode *inode)
static struct ksmbd_inode *ksmbd_inode_lookup(struct ksmbd_file *fp)
{
- return __ksmbd_inode_lookup(file_inode(fp->filp));
+ return __ksmbd_inode_lookup(fp->filp->f_path.dentry);
}
-static struct ksmbd_inode *ksmbd_inode_lookup_by_vfsinode(struct inode *inode)
-{
- struct ksmbd_inode *ci;
-
- read_lock(&inode_hash_lock);
- ci = __ksmbd_inode_lookup(inode);
- read_unlock(&inode_hash_lock);
- return ci;
-}
-
-int ksmbd_query_inode_status(struct inode *inode)
+int ksmbd_query_inode_status(struct dentry *dentry)
{
struct ksmbd_inode *ci;
int ret = KSMBD_INODE_STATUS_UNKNOWN;
read_lock(&inode_hash_lock);
- ci = __ksmbd_inode_lookup(inode);
+ ci = __ksmbd_inode_lookup(dentry);
if (ci) {
ret = KSMBD_INODE_STATUS_OK;
if (ci->m_flags & (S_DEL_PENDING | S_DEL_ON_CLS))
@@ -143,7 +133,7 @@ void ksmbd_fd_set_delete_on_close(struct ksmbd_file *fp,
static void ksmbd_inode_hash(struct ksmbd_inode *ci)
{
struct hlist_head *b = inode_hashtable +
- inode_hash(ci->m_inode->i_sb, ci->m_inode->i_ino);
+ inode_hash(d_inode(ci->m_de)->i_sb, (unsigned long)ci->m_de);
hlist_add_head(&ci->m_hash, b);
}
@@ -157,7 +147,6 @@ static void ksmbd_inode_unhash(struct ksmbd_inode *ci)
static int ksmbd_inode_init(struct ksmbd_inode *ci, struct ksmbd_file *fp)
{
- ci->m_inode = file_inode(fp->filp);
atomic_set(&ci->m_count, 1);
atomic_set(&ci->op_count, 0);
atomic_set(&ci->sop_count, 0);
@@ -166,6 +155,7 @@ static int ksmbd_inode_init(struct ksmbd_inode *ci, struct ksmbd_file *fp)
INIT_LIST_HEAD(&ci->m_fp_list);
INIT_LIST_HEAD(&ci->m_op_list);
rwlock_init(&ci->m_lock);
+ ci->m_de = fp->filp->f_path.dentry;
return 0;
}
@@ -488,12 +478,15 @@ struct ksmbd_file *ksmbd_lookup_fd_cguid(char *cguid)
return fp;
}
-struct ksmbd_file *ksmbd_lookup_fd_inode(struct inode *inode)
+struct ksmbd_file *ksmbd_lookup_fd_inode(struct dentry *dentry)
{
struct ksmbd_file *lfp;
struct ksmbd_inode *ci;
+ struct inode *inode = d_inode(dentry);
- ci = ksmbd_inode_lookup_by_vfsinode(inode);
+ read_lock(&inode_hash_lock);
+ ci = __ksmbd_inode_lookup(dentry);
+ read_unlock(&inode_hash_lock);
if (!ci)
return NULL;
diff --git a/fs/smb/server/vfs_cache.h b/fs/smb/server/vfs_cache.h
index 03d0bf941216..8325cf4527c4 100644
--- a/fs/smb/server/vfs_cache.h
+++ b/fs/smb/server/vfs_cache.h
@@ -51,7 +51,7 @@ struct ksmbd_inode {
atomic_t op_count;
/* opinfo count for streams */
atomic_t sop_count;
- struct inode *m_inode;
+ struct dentry *m_de;
unsigned int m_flags;
struct hlist_node m_hash;
struct list_head m_fp_list;
@@ -140,7 +140,7 @@ struct ksmbd_file *ksmbd_lookup_fd_slow(struct ksmbd_work *work, u64 id,
void ksmbd_fd_put(struct ksmbd_work *work, struct ksmbd_file *fp);
struct ksmbd_file *ksmbd_lookup_durable_fd(unsigned long long id);
struct ksmbd_file *ksmbd_lookup_fd_cguid(char *cguid);
-struct ksmbd_file *ksmbd_lookup_fd_inode(struct inode *inode);
+struct ksmbd_file *ksmbd_lookup_fd_inode(struct dentry *dentry);
unsigned int ksmbd_open_durable_fd(struct ksmbd_file *fp);
struct ksmbd_file *ksmbd_open_fd(struct ksmbd_work *work, struct file *filp);
void ksmbd_close_tree_conn_fds(struct ksmbd_work *work);
@@ -164,7 +164,7 @@ enum KSMBD_INODE_STATUS {
KSMBD_INODE_STATUS_PENDING_DELETE,
};
-int ksmbd_query_inode_status(struct inode *inode);
+int ksmbd_query_inode_status(struct dentry *dentry);
bool ksmbd_inode_pending_delete(struct ksmbd_file *fp);
void ksmbd_set_inode_pending_delete(struct ksmbd_file *fp);
void ksmbd_clear_inode_pending_delete(struct ksmbd_file *fp);
diff --git a/fs/squashfs/block.c b/fs/squashfs/block.c
index 581ce9519339..2dc730800f44 100644
--- a/fs/squashfs/block.c
+++ b/fs/squashfs/block.c
@@ -321,7 +321,7 @@ int squashfs_read_data(struct super_block *sb, u64 index, int length,
TRACE("Block @ 0x%llx, %scompressed size %d\n", index - 2,
compressed ? "" : "un", length);
}
- if (length < 0 || length > output->length ||
+ if (length <= 0 || length > output->length ||
(index + length) > msblk->bytes_used) {
res = -EIO;
goto out;
diff --git a/include/drm/bridge/aux-bridge.h b/include/drm/bridge/aux-bridge.h
new file mode 100644
index 000000000000..66249ff0858e
--- /dev/null
+++ b/include/drm/bridge/aux-bridge.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright (C) 2023 Linaro Ltd.
+ *
+ * Author: Dmitry Baryshkov <[email protected]>
+ */
+#ifndef DRM_AUX_BRIDGE_H
+#define DRM_AUX_BRIDGE_H
+
+#include <drm/drm_connector.h>
+
+#if IS_ENABLED(CONFIG_DRM_AUX_BRIDGE)
+int drm_aux_bridge_register(struct device *parent);
+#else
+static inline int drm_aux_bridge_register(struct device *parent)
+{
+ return 0;
+}
+#endif
+
+#if IS_ENABLED(CONFIG_DRM_AUX_HPD_BRIDGE)
+struct device *drm_dp_hpd_bridge_register(struct device *parent,
+ struct device_node *np);
+void drm_aux_hpd_bridge_notify(struct device *dev, enum drm_connector_status status);
+#else
+static inline struct device *drm_dp_hpd_bridge_register(struct device *parent,
+ struct device_node *np)
+{
+ return 0;
+}
+
+static inline void drm_aux_hpd_bridge_notify(struct device *dev, enum drm_connector_status status)
+{
+}
+#endif
+
+#endif
diff --git a/include/drm/display/drm_dp.h b/include/drm/display/drm_dp.h
index 83d2039c018b..3731828825bd 100644
--- a/include/drm/display/drm_dp.h
+++ b/include/drm/display/drm_dp.h
@@ -651,6 +651,9 @@
# define DP_LINK_QUAL_PATTERN_PRSBS31 0x38
# define DP_LINK_QUAL_PATTERN_CUSTOM 0x40
# define DP_LINK_QUAL_PATTERN_SQUARE 0x48
+# define DP_LINK_QUAL_PATTERN_SQUARE_PRESHOOT_DISABLED 0x49
+# define DP_LINK_QUAL_PATTERN_SQUARE_DEEMPHASIS_DISABLED 0x4a
+# define DP_LINK_QUAL_PATTERN_SQUARE_PRESHOOT_DEEMPHASIS_DISABLED 0x4b
#define DP_TRAINING_LANE0_1_SET2 0x10f
#define DP_TRAINING_LANE2_3_SET2 0x110
diff --git a/include/drm/drm_atomic_helper.h b/include/drm/drm_atomic_helper.h
index 536a0b0091c3..006b5c977ad7 100644
--- a/include/drm/drm_atomic_helper.h
+++ b/include/drm/drm_atomic_helper.h
@@ -97,6 +97,8 @@ void drm_atomic_helper_commit_modeset_enables(struct drm_device *dev,
int drm_atomic_helper_prepare_planes(struct drm_device *dev,
struct drm_atomic_state *state);
+void drm_atomic_helper_unprepare_planes(struct drm_device *dev,
+ struct drm_atomic_state *state);
#define DRM_PLANE_COMMIT_ACTIVE_ONLY BIT(0)
#define DRM_PLANE_COMMIT_NO_DISABLE_AFTER_MODESET BIT(1)
diff --git a/include/drm/drm_auth.h b/include/drm/drm_auth.h
index ba248ca8866f..50131383ed81 100644
--- a/include/drm/drm_auth.h
+++ b/include/drm/drm_auth.h
@@ -33,24 +33,6 @@
#include <linux/wait.h>
struct drm_file;
-struct drm_hw_lock;
-
-/*
- * Legacy DRI1 locking data structure. Only here instead of in drm_legacy.h for
- * include ordering reasons.
- *
- * DO NOT USE.
- */
-struct drm_lock_data {
- struct drm_hw_lock *hw_lock;
- struct drm_file *file_priv;
- wait_queue_head_t lock_queue;
- unsigned long lock_time;
- spinlock_t spinlock;
- uint32_t kernel_waiters;
- uint32_t user_waiters;
- int idle_has_lock;
-};
/**
* struct drm_master - drm master structure
@@ -145,10 +127,6 @@ struct drm_master {
* Protected by &drm_device.mode_config's &drm_mode_config.idr_mutex.
*/
struct idr lessee_idr;
- /* private: */
-#if IS_ENABLED(CONFIG_DRM_LEGACY)
- struct drm_lock_data lock;
-#endif
};
struct drm_master *drm_master_get(struct drm_master *master);
diff --git a/include/drm/drm_bridge.h b/include/drm/drm_bridge.h
index cfb7dcdb66c4..e39da5807ba7 100644
--- a/include/drm/drm_bridge.h
+++ b/include/drm/drm_bridge.h
@@ -194,7 +194,7 @@ struct drm_bridge_funcs {
* or &drm_encoder_helper_funcs.dpms hook.
*
* The bridge must assume that the display pipe (i.e. clocks and timing
- * singals) feeding it is no longer running when this callback is
+ * signals) feeding it is no longer running when this callback is
* called.
*
* The @post_disable callback is optional.
@@ -950,6 +950,4 @@ static inline struct drm_bridge *drmm_of_get_bridge(struct drm_device *drm,
}
#endif
-void drm_bridge_debugfs_init(struct drm_device *dev);
-
#endif
diff --git a/include/drm/drm_device.h b/include/drm/drm_device.h
index c490977ee250..63767cf24371 100644
--- a/include/drm/drm_device.h
+++ b/include/drm/drm_device.h
@@ -6,7 +6,6 @@
#include <linux/mutex.h>
#include <linux/idr.h>
-#include <drm/drm_legacy.h>
#include <drm/drm_mode_config.h>
struct drm_driver;
@@ -153,8 +152,8 @@ struct drm_device {
*
* Lock for others (not &drm_minor.master and &drm_file.is_master)
*
- * WARNING:
- * Only drivers annotated with DRIVER_LEGACY should be using this.
+ * TODO: This lock used to be the BKL of the DRM subsystem. Move the
+ * lock into i915, which is the only remaining user.
*/
struct mutex struct_mutex;
@@ -317,72 +316,6 @@ struct drm_device {
* Root directory for debugfs files.
*/
struct dentry *debugfs_root;
-
- /* Everything below here is for legacy driver, never use! */
- /* private: */
-#if IS_ENABLED(CONFIG_DRM_LEGACY)
- /* List of devices per driver for stealth attach cleanup */
- struct list_head legacy_dev_list;
-
-#ifdef __alpha__
- /** @hose: PCI hose, only used on ALPHA platforms. */
- struct pci_controller *hose;
-#endif
-
- /* AGP data */
- struct drm_agp_head *agp;
-
- /* Context handle management - linked list of context handles */
- struct list_head ctxlist;
-
- /* Context handle management - mutex for &ctxlist */
- struct mutex ctxlist_mutex;
-
- /* Context handle management */
- struct idr ctx_idr;
-
- /* Memory management - linked list of regions */
- struct list_head maplist;
-
- /* Memory management - user token hash table for maps */
- struct drm_open_hash map_hash;
-
- /* Context handle management - list of vmas (for debugging) */
- struct list_head vmalist;
-
- /* Optional pointer for DMA support */
- struct drm_device_dma *dma;
-
- /* Context swapping flag */
- __volatile__ long context_flag;
-
- /* Last current context */
- int last_context;
-
- /* Lock for &buf_use and a few other things. */
- spinlock_t buf_lock;
-
- /* Usage counter for buffers in use -- cannot alloc */
- int buf_use;
-
- /* Buffer allocation in progress */
- atomic_t buf_alloc;
-
- struct {
- int context;
- struct drm_hw_lock *lock;
- } sigdata;
-
- struct drm_local_map *agp_buffer_map;
- unsigned int agp_buffer_token;
-
- /* Scatter gather memory */
- struct drm_sg_mem *sg;
-
- /* IRQs */
- bool irq_enabled;
- int irq;
-#endif
};
#endif
diff --git a/include/drm/drm_drv.h b/include/drm/drm_drv.h
index e2640dc64e08..8878260d7529 100644
--- a/include/drm/drm_drv.h
+++ b/include/drm/drm_drv.h
@@ -110,6 +110,15 @@ enum drm_driver_feature {
* Driver supports user defined GPU VA bindings for GEM objects.
*/
DRIVER_GEM_GPUVA = BIT(8),
+ /**
+ * @DRIVER_CURSOR_HOTSPOT:
+ *
+ * Driver supports and requires cursor hotspot information in the
+ * cursor plane (e.g. cursor plane has to actually track the mouse
+ * cursor and the clients are required to set hotspot in order for
+ * the cursor planes to work correctly).
+ */
+ DRIVER_CURSOR_HOTSPOT = BIT(9),
/* IMPORTANT: Below are all the legacy flags, add new ones above. */
@@ -433,25 +442,6 @@ struct drm_driver {
* some examples.
*/
const struct file_operations *fops;
-
-#ifdef CONFIG_DRM_LEGACY
- /* Everything below here is for legacy driver, never use! */
- /* private: */
-
- int (*firstopen) (struct drm_device *);
- void (*preclose) (struct drm_device *, struct drm_file *file_priv);
- int (*dma_ioctl) (struct drm_device *dev, void *data, struct drm_file *file_priv);
- int (*dma_quiescent) (struct drm_device *);
- int (*context_dtor) (struct drm_device *dev, int context);
- irqreturn_t (*irq_handler)(int irq, void *arg);
- void (*irq_preinstall)(struct drm_device *dev);
- int (*irq_postinstall)(struct drm_device *dev);
- void (*irq_uninstall)(struct drm_device *dev);
- u32 (*get_vblank_counter)(struct drm_device *dev, unsigned int pipe);
- int (*enable_vblank)(struct drm_device *dev, unsigned int pipe);
- void (*disable_vblank)(struct drm_device *dev, unsigned int pipe);
- int dev_priv_size;
-#endif
};
void *__devm_drm_dev_alloc(struct device *parent,
diff --git a/include/drm/drm_encoder.h b/include/drm/drm_encoder.h
index 3a09682af685..977a9381c8ba 100644
--- a/include/drm/drm_encoder.h
+++ b/include/drm/drm_encoder.h
@@ -60,7 +60,7 @@ struct drm_encoder_funcs {
* @late_register:
*
* This optional hook can be used to register additional userspace
- * interfaces attached to the encoder like debugfs interfaces.
+ * interfaces attached to the encoder.
* It is called late in the driver load sequence from drm_dev_register().
* Everything added from this callback should be unregistered in
* the early_unregister callback.
@@ -81,6 +81,13 @@ struct drm_encoder_funcs {
* before data structures are torndown.
*/
void (*early_unregister)(struct drm_encoder *encoder);
+
+ /**
+ * @debugfs_init:
+ *
+ * Allows encoders to create encoder-specific debugfs files.
+ */
+ void (*debugfs_init)(struct drm_encoder *encoder, struct dentry *root);
};
/**
@@ -184,6 +191,13 @@ struct drm_encoder {
const struct drm_encoder_funcs *funcs;
const struct drm_encoder_helper_funcs *helper_private;
+
+ /**
+ * @debugfs_entry:
+ *
+ * Debugfs directory for this CRTC.
+ */
+ struct dentry *debugfs_entry;
};
#define obj_to_encoder(x) container_of(x, struct drm_encoder, base)
diff --git a/include/drm/drm_file.h b/include/drm/drm_file.h
index e1b5b4282f75..ab230d3af138 100644
--- a/include/drm/drm_file.h
+++ b/include/drm/drm_file.h
@@ -227,6 +227,18 @@ struct drm_file {
bool is_master;
/**
+ * @supports_virtualized_cursor_plane:
+ *
+ * This client is capable of handling the cursor plane with the
+ * restrictions imposed on it by the virtualized drivers.
+ *
+ * This implies that the cursor plane has to behave like a cursor
+ * i.e. track cursor movement. It also requires setting of the
+ * hotspot properties by the client on the cursor plane.
+ */
+ bool supports_virtualized_cursor_plane;
+
+ /**
* @master:
*
* Master this node is currently associated with. Protected by struct
@@ -374,11 +386,6 @@ struct drm_file {
* Per-file buffer caches used by the PRIME buffer sharing code.
*/
struct drm_prime_file_private prime;
-
- /* private: */
-#if IS_ENABLED(CONFIG_DRM_LEGACY)
- unsigned long lock_count; /* DRI1 legacy lock count */
-#endif
};
/**
diff --git a/include/drm/drm_framebuffer.h b/include/drm/drm_framebuffer.h
index 80ece7b6dd9b..668077009fce 100644
--- a/include/drm/drm_framebuffer.h
+++ b/include/drm/drm_framebuffer.h
@@ -189,18 +189,6 @@ struct drm_framebuffer {
*/
int flags;
/**
- * @hot_x: X coordinate of the cursor hotspot. Used by the legacy cursor
- * IOCTL when the driver supports cursor through a DRM_PLANE_TYPE_CURSOR
- * universal plane.
- */
- int hot_x;
- /**
- * @hot_y: Y coordinate of the cursor hotspot. Used by the legacy cursor
- * IOCTL when the driver supports cursor through a DRM_PLANE_TYPE_CURSOR
- * universal plane.
- */
- int hot_y;
- /**
* @filp_head: Placed on &drm_file.fbs, protected by &drm_file.fbs_lock.
*/
struct list_head filp_head;
diff --git a/include/drm/drm_gpuvm.h b/include/drm/drm_gpuvm.h
index f94fec9a8517..f893d9c9b641 100644
--- a/include/drm/drm_gpuvm.h
+++ b/include/drm/drm_gpuvm.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/* SPDX-License-Identifier: GPL-2.0-only OR MIT */
#ifndef __DRM_GPUVM_H__
#define __DRM_GPUVM_H__
@@ -544,26 +544,9 @@ struct drm_gpuvm_exec {
} extra;
};
-/**
- * drm_gpuvm_prepare_vm() - prepare the GPUVMs common dma-resv
- * @gpuvm: the &drm_gpuvm
- * @exec: the &drm_exec context
- * @num_fences: the amount of &dma_fences to reserve
- *
- * Calls drm_exec_prepare_obj() for the GPUVMs dummy &drm_gem_object.
- *
- * Using this function directly, it is the drivers responsibility to call
- * drm_exec_init() and drm_exec_fini() accordingly.
- *
- * Returns: 0 on success, negative error code on failure.
- */
-static inline int
-drm_gpuvm_prepare_vm(struct drm_gpuvm *gpuvm,
- struct drm_exec *exec,
- unsigned int num_fences)
-{
- return drm_exec_prepare_obj(exec, gpuvm->r_obj, num_fences);
-}
+int drm_gpuvm_prepare_vm(struct drm_gpuvm *gpuvm,
+ struct drm_exec *exec,
+ unsigned int num_fences);
int drm_gpuvm_prepare_objects(struct drm_gpuvm *gpuvm,
struct drm_exec *exec,
@@ -738,7 +721,7 @@ drm_gpuvm_bo_get(struct drm_gpuvm_bo *vm_bo)
return vm_bo;
}
-void drm_gpuvm_bo_put(struct drm_gpuvm_bo *vm_bo);
+bool drm_gpuvm_bo_put(struct drm_gpuvm_bo *vm_bo);
struct drm_gpuvm_bo *
drm_gpuvm_bo_find(struct drm_gpuvm *gpuvm,
diff --git a/include/drm/drm_ioctl.h b/include/drm/drm_ioctl.h
index 6ed61c371f6c..171760b6c4a1 100644
--- a/include/drm/drm_ioctl.h
+++ b/include/drm/drm_ioctl.h
@@ -110,17 +110,6 @@ enum drm_ioctl_flags {
*/
DRM_ROOT_ONLY = BIT(2),
/**
- * @DRM_UNLOCKED:
- *
- * Whether &drm_ioctl_desc.func should be called with the DRM BKL held
- * or not. Enforced as the default for all modern drivers, hence there
- * should never be a need to set this flag.
- *
- * Do not use anywhere else than for the VBLANK_WAIT IOCTL, which is the
- * only legacy IOCTL which needs this.
- */
- DRM_UNLOCKED = BIT(4),
- /**
* @DRM_RENDER_ALLOW:
*
* This is used for all ioctl needed for rendering only, for drivers
diff --git a/include/drm/drm_legacy.h b/include/drm/drm_legacy.h
deleted file mode 100644
index 0fc85418aad8..000000000000
--- a/include/drm/drm_legacy.h
+++ /dev/null
@@ -1,331 +0,0 @@
-#ifndef __DRM_DRM_LEGACY_H__
-#define __DRM_DRM_LEGACY_H__
-/*
- * Legacy driver interfaces for the Direct Rendering Manager
- *
- * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
- * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
- * Copyright (c) 2009-2010, Code Aurora Forum.
- * All rights reserved.
- * Copyright © 2014 Intel Corporation
- * Daniel Vetter <[email protected]>
- *
- * Author: Rickard E. (Rik) Faith <[email protected]>
- * Author: Gareth Hughes <[email protected]>
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
-#include <linux/agp_backend.h>
-
-#include <drm/drm.h>
-#include <drm/drm_auth.h>
-
-struct drm_device;
-struct drm_driver;
-struct file;
-struct pci_driver;
-
-/*
- * Legacy Support for palateontologic DRM drivers
- *
- * If you add a new driver and it uses any of these functions or structures,
- * you're doing it terribly wrong.
- */
-
-/*
- * Hash-table Support
- */
-
-struct drm_hash_item {
- struct hlist_node head;
- unsigned long key;
-};
-
-struct drm_open_hash {
- struct hlist_head *table;
- u8 order;
-};
-
-/**
- * DMA buffer.
- */
-struct drm_buf {
- int idx; /**< Index into master buflist */
- int total; /**< Buffer size */
- int order; /**< log-base-2(total) */
- int used; /**< Amount of buffer in use (for DMA) */
- unsigned long offset; /**< Byte offset (used internally) */
- void *address; /**< Address of buffer */
- unsigned long bus_address; /**< Bus address of buffer */
- struct drm_buf *next; /**< Kernel-only: used for free list */
- __volatile__ int waiting; /**< On kernel DMA queue */
- __volatile__ int pending; /**< On hardware DMA queue */
- struct drm_file *file_priv; /**< Private of holding file descr */
- int context; /**< Kernel queue for this buffer */
- int while_locked; /**< Dispatch this buffer while locked */
- enum {
- DRM_LIST_NONE = 0,
- DRM_LIST_FREE = 1,
- DRM_LIST_WAIT = 2,
- DRM_LIST_PEND = 3,
- DRM_LIST_PRIO = 4,
- DRM_LIST_RECLAIM = 5
- } list; /**< Which list we're on */
-
- int dev_priv_size; /**< Size of buffer private storage */
- void *dev_private; /**< Per-buffer private storage */
-};
-
-typedef struct drm_dma_handle {
- dma_addr_t busaddr;
- void *vaddr;
- size_t size;
-} drm_dma_handle_t;
-
-/**
- * Buffer entry. There is one of this for each buffer size order.
- */
-struct drm_buf_entry {
- int buf_size; /**< size */
- int buf_count; /**< number of buffers */
- struct drm_buf *buflist; /**< buffer list */
- int seg_count;
- int page_order;
- struct drm_dma_handle **seglist;
-
- int low_mark; /**< Low water mark */
- int high_mark; /**< High water mark */
-};
-
-/**
- * DMA data.
- */
-struct drm_device_dma {
-
- struct drm_buf_entry bufs[DRM_MAX_ORDER + 1]; /**< buffers, grouped by their size order */
- int buf_count; /**< total number of buffers */
- struct drm_buf **buflist; /**< Vector of pointers into drm_device_dma::bufs */
- int seg_count;
- int page_count; /**< number of pages */
- unsigned long *pagelist; /**< page list */
- unsigned long byte_count;
- enum {
- _DRM_DMA_USE_AGP = 0x01,
- _DRM_DMA_USE_SG = 0x02,
- _DRM_DMA_USE_FB = 0x04,
- _DRM_DMA_USE_PCI_RO = 0x08
- } flags;
-
-};
-
-/**
- * Scatter-gather memory.
- */
-struct drm_sg_mem {
- unsigned long handle;
- void *virtual;
- int pages;
- struct page **pagelist;
- dma_addr_t *busaddr;
-};
-
-/**
- * Kernel side of a mapping
- */
-struct drm_local_map {
- dma_addr_t offset; /**< Requested physical address (0 for SAREA)*/
- unsigned long size; /**< Requested physical size (bytes) */
- enum drm_map_type type; /**< Type of memory to map */
- enum drm_map_flags flags; /**< Flags */
- void *handle; /**< User-space: "Handle" to pass to mmap() */
- /**< Kernel-space: kernel-virtual address */
- int mtrr; /**< MTRR slot used */
-};
-
-typedef struct drm_local_map drm_local_map_t;
-
-/**
- * Mappings list
- */
-struct drm_map_list {
- struct list_head head; /**< list head */
- struct drm_hash_item hash;
- struct drm_local_map *map; /**< mapping */
- uint64_t user_token;
- struct drm_master *master;
-};
-
-int drm_legacy_addmap(struct drm_device *d, resource_size_t offset,
- unsigned int size, enum drm_map_type type,
- enum drm_map_flags flags, struct drm_local_map **map_p);
-struct drm_local_map *drm_legacy_findmap(struct drm_device *dev, unsigned int token);
-void drm_legacy_rmmap(struct drm_device *d, struct drm_local_map *map);
-int drm_legacy_rmmap_locked(struct drm_device *d, struct drm_local_map *map);
-struct drm_local_map *drm_legacy_getsarea(struct drm_device *dev);
-int drm_legacy_mmap(struct file *filp, struct vm_area_struct *vma);
-
-int drm_legacy_addbufs_agp(struct drm_device *d, struct drm_buf_desc *req);
-int drm_legacy_addbufs_pci(struct drm_device *d, struct drm_buf_desc *req);
-
-/**
- * Test that the hardware lock is held by the caller, returning otherwise.
- *
- * \param dev DRM device.
- * \param filp file pointer of the caller.
- */
-#define LOCK_TEST_WITH_RETURN( dev, _file_priv ) \
-do { \
- if (!_DRM_LOCK_IS_HELD(_file_priv->master->lock.hw_lock->lock) || \
- _file_priv->master->lock.file_priv != _file_priv) { \
- DRM_ERROR( "%s called without lock held, held %d owner %p %p\n",\
- __func__, _DRM_LOCK_IS_HELD(_file_priv->master->lock.hw_lock->lock),\
- _file_priv->master->lock.file_priv, _file_priv); \
- return -EINVAL; \
- } \
-} while (0)
-
-void drm_legacy_idlelock_take(struct drm_lock_data *lock);
-void drm_legacy_idlelock_release(struct drm_lock_data *lock);
-
-/* drm_irq.c */
-int drm_legacy_irq_uninstall(struct drm_device *dev);
-
-/* drm_pci.c */
-
-#ifdef CONFIG_PCI
-
-int drm_legacy_pci_init(const struct drm_driver *driver,
- struct pci_driver *pdriver);
-void drm_legacy_pci_exit(const struct drm_driver *driver,
- struct pci_driver *pdriver);
-
-#else
-
-static inline struct drm_dma_handle *drm_pci_alloc(struct drm_device *dev,
- size_t size, size_t align)
-{
- return NULL;
-}
-
-static inline void drm_pci_free(struct drm_device *dev,
- struct drm_dma_handle *dmah)
-{
-}
-
-static inline int drm_legacy_pci_init(const struct drm_driver *driver,
- struct pci_driver *pdriver)
-{
- return -EINVAL;
-}
-
-static inline void drm_legacy_pci_exit(const struct drm_driver *driver,
- struct pci_driver *pdriver)
-{
-}
-
-#endif
-
-/*
- * AGP Support
- */
-
-struct drm_agp_head {
- struct agp_kern_info agp_info;
- struct list_head memory;
- unsigned long mode;
- struct agp_bridge_data *bridge;
- int enabled;
- int acquired;
- unsigned long base;
- int agp_mtrr;
- int cant_use_aperture;
- unsigned long page_mask;
-};
-
-#if IS_ENABLED(CONFIG_DRM_LEGACY) && IS_ENABLED(CONFIG_AGP)
-struct drm_agp_head *drm_legacy_agp_init(struct drm_device *dev);
-int drm_legacy_agp_acquire(struct drm_device *dev);
-int drm_legacy_agp_release(struct drm_device *dev);
-int drm_legacy_agp_enable(struct drm_device *dev, struct drm_agp_mode mode);
-int drm_legacy_agp_info(struct drm_device *dev, struct drm_agp_info *info);
-int drm_legacy_agp_alloc(struct drm_device *dev, struct drm_agp_buffer *request);
-int drm_legacy_agp_free(struct drm_device *dev, struct drm_agp_buffer *request);
-int drm_legacy_agp_unbind(struct drm_device *dev, struct drm_agp_binding *request);
-int drm_legacy_agp_bind(struct drm_device *dev, struct drm_agp_binding *request);
-#else
-static inline struct drm_agp_head *drm_legacy_agp_init(struct drm_device *dev)
-{
- return NULL;
-}
-
-static inline int drm_legacy_agp_acquire(struct drm_device *dev)
-{
- return -ENODEV;
-}
-
-static inline int drm_legacy_agp_release(struct drm_device *dev)
-{
- return -ENODEV;
-}
-
-static inline int drm_legacy_agp_enable(struct drm_device *dev,
- struct drm_agp_mode mode)
-{
- return -ENODEV;
-}
-
-static inline int drm_legacy_agp_info(struct drm_device *dev,
- struct drm_agp_info *info)
-{
- return -ENODEV;
-}
-
-static inline int drm_legacy_agp_alloc(struct drm_device *dev,
- struct drm_agp_buffer *request)
-{
- return -ENODEV;
-}
-
-static inline int drm_legacy_agp_free(struct drm_device *dev,
- struct drm_agp_buffer *request)
-{
- return -ENODEV;
-}
-
-static inline int drm_legacy_agp_unbind(struct drm_device *dev,
- struct drm_agp_binding *request)
-{
- return -ENODEV;
-}
-
-static inline int drm_legacy_agp_bind(struct drm_device *dev,
- struct drm_agp_binding *request)
-{
- return -ENODEV;
-}
-#endif
-
-/* drm_memory.c */
-void drm_legacy_ioremap(struct drm_local_map *map, struct drm_device *dev);
-void drm_legacy_ioremap_wc(struct drm_local_map *map, struct drm_device *dev);
-void drm_legacy_ioremapfree(struct drm_local_map *map, struct drm_device *dev);
-
-#endif /* __DRM_DRM_LEGACY_H__ */
diff --git a/include/drm/drm_mipi_dsi.h b/include/drm/drm_mipi_dsi.h
index c9df0407980c..c0aec0d4d664 100644
--- a/include/drm/drm_mipi_dsi.h
+++ b/include/drm/drm_mipi_dsi.h
@@ -168,6 +168,7 @@ struct mipi_dsi_device_info {
* struct mipi_dsi_device - DSI peripheral device
* @host: DSI host for this peripheral
* @dev: driver model device node for this peripheral
+ * @attached: the DSI device has been successfully attached
* @name: DSI peripheral chip type
* @channel: virtual channel assigned to the peripheral
* @format: pixel format for video mode
@@ -184,6 +185,7 @@ struct mipi_dsi_device_info {
struct mipi_dsi_device {
struct mipi_dsi_host *host;
struct device dev;
+ bool attached;
char name[DSI_DEV_NAME_SIZE];
unsigned int channel;
diff --git a/include/drm/drm_modeset_helper_vtables.h b/include/drm/drm_modeset_helper_vtables.h
index e3c3ac615909..a33cf7488737 100644
--- a/include/drm/drm_modeset_helper_vtables.h
+++ b/include/drm/drm_modeset_helper_vtables.h
@@ -1154,6 +1154,11 @@ struct drm_connector_helper_funcs {
* This operation is optional.
*
* This callback is used by the drm_kms_helper_poll_enable() helpers.
+ *
+ * This operation does not need to perform any hpd state tracking as
+ * the DRM core handles that maintenance and ensures the calls to enable
+ * and disable hpd are balanced.
+ *
*/
void (*enable_hpd)(struct drm_connector *connector);
@@ -1165,6 +1170,11 @@ struct drm_connector_helper_funcs {
* This operation is optional.
*
* This callback is used by the drm_kms_helper_poll_disable() helpers.
+ *
+ * This operation does not need to perform any hpd state tracking as
+ * the DRM core handles that maintenance and ensures the calls to enable
+ * and disable hpd are balanced.
+ *
*/
void (*disable_hpd)(struct drm_connector *connector);
};
diff --git a/include/drm/drm_plane.h b/include/drm/drm_plane.h
index 79d62856defb..c6565a6f9324 100644
--- a/include/drm/drm_plane.h
+++ b/include/drm/drm_plane.h
@@ -116,6 +116,10 @@ struct drm_plane_state {
/** @src_h: height of visible portion of plane (in 16.16) */
uint32_t src_h, src_w;
+ /** @hotspot_x: x offset to mouse cursor hotspot */
+ /** @hotspot_y: y offset to mouse cursor hotspot */
+ int32_t hotspot_x, hotspot_y;
+
/**
* @alpha:
* Opacity of the plane with 0 as completely transparent and 0xffff as
@@ -191,6 +195,16 @@ struct drm_plane_state {
struct drm_property_blob *fb_damage_clips;
/**
+ * @ignore_damage_clips:
+ *
+ * Set by drivers to indicate the drm_atomic_helper_damage_iter_init()
+ * helper that the @fb_damage_clips blob property should be ignored.
+ *
+ * See :ref:`damage_tracking_properties` for more information.
+ */
+ bool ignore_damage_clips;
+
+ /**
* @src:
*
* source coordinates of the plane (in 16.16).
@@ -748,6 +762,16 @@ struct drm_plane {
* scaling.
*/
struct drm_property *scaling_filter_property;
+
+ /**
+ * @hotspot_x_property: property to set mouse hotspot x offset.
+ */
+ struct drm_property *hotspot_x_property;
+
+ /**
+ * @hotspot_y_property: property to set mouse hotspot y offset.
+ */
+ struct drm_property *hotspot_y_property;
};
#define obj_to_plane(x) container_of(x, struct drm_plane, base)
diff --git a/include/drm/drm_plane_helper.h b/include/drm/drm_plane_helper.h
index 3a574e8cd22f..75f9c4830564 100644
--- a/include/drm/drm_plane_helper.h
+++ b/include/drm/drm_plane_helper.h
@@ -26,7 +26,6 @@
#include <linux/types.h>
-struct drm_atomic_state;
struct drm_crtc;
struct drm_framebuffer;
struct drm_modeset_acquire_ctx;
@@ -42,7 +41,6 @@ int drm_plane_helper_update_primary(struct drm_plane *plane, struct drm_crtc *cr
int drm_plane_helper_disable_primary(struct drm_plane *plane,
struct drm_modeset_acquire_ctx *ctx);
void drm_plane_helper_destroy(struct drm_plane *plane);
-int drm_plane_helper_atomic_check(struct drm_plane *plane, struct drm_atomic_state *state);
/**
* DRM_PLANE_NON_ATOMIC_FUNCS - Default plane functions for non-atomic drivers
diff --git a/include/drm/drm_prime.h b/include/drm/drm_prime.h
index a7abf9f3e697..2a1d01e5b56b 100644
--- a/include/drm/drm_prime.h
+++ b/include/drm/drm_prime.h
@@ -60,12 +60,19 @@ enum dma_data_direction;
struct drm_device;
struct drm_gem_object;
+struct drm_file;
/* core prime functions */
struct dma_buf *drm_gem_dmabuf_export(struct drm_device *dev,
struct dma_buf_export_info *exp_info);
void drm_gem_dmabuf_release(struct dma_buf *dma_buf);
+int drm_gem_prime_fd_to_handle(struct drm_device *dev,
+ struct drm_file *file_priv, int prime_fd, uint32_t *handle);
+int drm_gem_prime_handle_to_fd(struct drm_device *dev,
+ struct drm_file *file_priv, uint32_t handle, uint32_t flags,
+ int *prime_fd);
+
/* helper functions for exporting */
int drm_gem_map_attach(struct dma_buf *dma_buf,
struct dma_buf_attachment *attach);
diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h
index 9a50348bd5c0..5acc64954a88 100644
--- a/include/drm/gpu_scheduler.h
+++ b/include/drm/gpu_scheduler.h
@@ -63,10 +63,10 @@ struct drm_file;
* to an array, and as such should start at 0.
*/
enum drm_sched_priority {
- DRM_SCHED_PRIORITY_MIN,
- DRM_SCHED_PRIORITY_NORMAL,
- DRM_SCHED_PRIORITY_HIGH,
DRM_SCHED_PRIORITY_KERNEL,
+ DRM_SCHED_PRIORITY_HIGH,
+ DRM_SCHED_PRIORITY_NORMAL,
+ DRM_SCHED_PRIORITY_LOW,
DRM_SCHED_PRIORITY_COUNT
};
diff --git a/include/drm/i915_pciids.h b/include/drm/i915_pciids.h
index 1c9ea6ab3eb9..fcf1849aa47c 100644
--- a/include/drm/i915_pciids.h
+++ b/include/drm/i915_pciids.h
@@ -738,7 +738,8 @@
INTEL_DG2_G12_IDS(info)
#define INTEL_ATS_M150_IDS(info) \
- INTEL_VGA_DEVICE(0x56C0, info)
+ INTEL_VGA_DEVICE(0x56C0, info), \
+ INTEL_VGA_DEVICE(0x56C2, info)
#define INTEL_ATS_M75_IDS(info) \
INTEL_VGA_DEVICE(0x56C1, info)
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index 54189e0e5f41..4db54e928b36 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -15,7 +15,6 @@
#include <linux/mod_devicetable.h>
#include <linux/property.h>
#include <linux/uuid.h>
-#include <linux/fw_table.h>
struct irq_domain;
struct irq_domain_ops;
@@ -25,22 +24,13 @@ struct irq_domain_ops;
#endif
#include <acpi/acpi.h>
-#ifdef CONFIG_ACPI_TABLE_LIB
-#define EXPORT_SYMBOL_ACPI_LIB(x) EXPORT_SYMBOL_NS_GPL(x, ACPI)
-#define __init_or_acpilib
-#define __initdata_or_acpilib
-#else
-#define EXPORT_SYMBOL_ACPI_LIB(x)
-#define __init_or_acpilib __init
-#define __initdata_or_acpilib __initdata
-#endif
-
#ifdef CONFIG_ACPI
#include <linux/list.h>
#include <linux/dynamic_debug.h>
#include <linux/module.h>
#include <linux/mutex.h>
+#include <linux/fw_table.h>
#include <acpi/acpi_bus.h>
#include <acpi/acpi_drivers.h>
@@ -48,6 +38,16 @@ struct irq_domain_ops;
#include <acpi/acpi_io.h>
#include <asm/acpi.h>
+#ifdef CONFIG_ACPI_TABLE_LIB
+#define EXPORT_SYMBOL_ACPI_LIB(x) EXPORT_SYMBOL_NS_GPL(x, ACPI)
+#define __init_or_acpilib
+#define __initdata_or_acpilib
+#else
+#define EXPORT_SYMBOL_ACPI_LIB(x)
+#define __init_or_acpilib __init
+#define __initdata_or_acpilib __initdata
+#endif
+
static inline acpi_handle acpi_device_handle(struct acpi_device *adev)
{
return adev ? adev->handle : NULL;
diff --git a/include/linux/amd-pstate.h b/include/linux/amd-pstate.h
index 446394f84606..6ad02ad9c7b4 100644
--- a/include/linux/amd-pstate.h
+++ b/include/linux/amd-pstate.h
@@ -70,6 +70,10 @@ struct amd_cpudata {
u32 nominal_perf;
u32 lowest_nonlinear_perf;
u32 lowest_perf;
+ u32 min_limit_perf;
+ u32 max_limit_perf;
+ u32 min_limit_freq;
+ u32 max_limit_freq;
u32 max_freq;
u32 min_freq;
diff --git a/include/linux/arm_ffa.h b/include/linux/arm_ffa.h
index 1abedb5b2e48..3d0fde57ba90 100644
--- a/include/linux/arm_ffa.h
+++ b/include/linux/arm_ffa.h
@@ -209,6 +209,8 @@ bool ffa_device_is_valid(struct ffa_device *ffa_dev) { return false; }
#define module_ffa_driver(__ffa_driver) \
module_driver(__ffa_driver, ffa_register, ffa_unregister)
+extern struct bus_type ffa_bus_type;
+
/* FFA transport related */
struct ffa_partition_info {
u16 id;
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index d5c5e59ddbd2..b29ebd53417d 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -49,9 +49,10 @@ struct block_device {
bool bd_write_holder;
bool bd_has_submit_bio;
dev_t bd_dev;
+ struct inode *bd_inode; /* will die */
+
atomic_t bd_openers;
spinlock_t bd_size_lock; /* for bd_inode->i_size updates */
- struct inode * bd_inode; /* will die */
void * bd_claiming;
void * bd_holder;
const struct blk_holder_ops *bd_holder_ops;
@@ -69,6 +70,7 @@ struct block_device {
#ifdef CONFIG_FAIL_MAKE_REQUEST
bool bd_make_it_fail;
#endif
+ bool bd_ro_warned;
/*
* keep this out-of-line as it's both big and not needed in the fast
* path
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index 6762dac3ef76..cff5bb08820e 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -3175,6 +3175,9 @@ enum bpf_text_poke_type {
int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
void *addr1, void *addr2);
+void bpf_arch_poke_desc_update(struct bpf_jit_poke_descriptor *poke,
+ struct bpf_prog *new, struct bpf_prog *old);
+
void *bpf_arch_text_copy(void *dst, void *src, size_t len);
int bpf_arch_text_invalidate(void *dst, size_t len);
diff --git a/include/linux/closure.h b/include/linux/closure.h
index de7bb47d8a46..c554c6a08768 100644
--- a/include/linux/closure.h
+++ b/include/linux/closure.h
@@ -104,7 +104,7 @@
struct closure;
struct closure_syncer;
-typedef void (closure_fn) (struct closure *);
+typedef void (closure_fn) (struct work_struct *);
extern struct dentry *bcache_debug;
struct closure_waitlist {
@@ -254,7 +254,7 @@ static inline void closure_queue(struct closure *cl)
INIT_WORK(&cl->work, cl->work.func);
BUG_ON(!queue_work(wq, &cl->work));
} else
- cl->fn(cl);
+ cl->fn(&cl->work);
}
/**
@@ -309,6 +309,11 @@ static inline void closure_wake_up(struct closure_waitlist *list)
__closure_wake_up(list);
}
+#define CLOSURE_CALLBACK(name) void name(struct work_struct *ws)
+#define closure_type(name, type, member) \
+ struct closure *cl = container_of(ws, struct closure, work); \
+ type *name = container_of(cl, type, member)
+
/**
* continue_at - jump to another function with barrier
*
diff --git a/include/linux/debugfs.h b/include/linux/debugfs.h
index ea2d919fd9c7..c9c65b132c0f 100644
--- a/include/linux/debugfs.h
+++ b/include/linux/debugfs.h
@@ -171,6 +171,25 @@ ssize_t debugfs_write_file_bool(struct file *file, const char __user *user_buf,
ssize_t debugfs_read_file_str(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos);
+/**
+ * struct debugfs_cancellation - cancellation data
+ * @list: internal, for keeping track
+ * @cancel: callback to call
+ * @cancel_data: extra data for the callback to call
+ */
+struct debugfs_cancellation {
+ struct list_head list;
+ void (*cancel)(struct dentry *, void *);
+ void *cancel_data;
+};
+
+void __acquires(cancellation)
+debugfs_enter_cancellation(struct file *file,
+ struct debugfs_cancellation *cancellation);
+void __releases(cancellation)
+debugfs_leave_cancellation(struct file *file,
+ struct debugfs_cancellation *cancellation);
+
#else
#include <linux/err.h>
diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h
index 3f31baa3293f..8ff4add71f88 100644
--- a/include/linux/dma-buf.h
+++ b/include/linux/dma-buf.h
@@ -343,16 +343,19 @@ struct dma_buf {
/**
* @exp_name:
*
- * Name of the exporter; useful for debugging. See the
- * DMA_BUF_SET_NAME IOCTL.
+ * Name of the exporter; useful for debugging. Must not be NULL
*/
const char *exp_name;
/**
* @name:
*
- * Userspace-provided name; useful for accounting and debugging,
- * protected by dma_resv_lock() on @resv and @name_lock for read access.
+ * Userspace-provided name. Default value is NULL. If not NULL,
+ * length cannot be longer than DMA_BUF_NAME_LEN, including NIL
+ * char. Useful for accounting and debugging. Read/Write accesses
+ * are protected by @name_lock
+ *
+ * See the IOCTLs DMA_BUF_SET_NAME or DMA_BUF_SET_NAME_A/B
*/
const char *name;
diff --git a/include/linux/dma-fence.h b/include/linux/dma-fence.h
index ebe78bd3d121..b3772edca2e6 100644
--- a/include/linux/dma-fence.h
+++ b/include/linux/dma-fence.h
@@ -499,6 +499,21 @@ static inline bool dma_fence_is_later(struct dma_fence *f1,
}
/**
+ * dma_fence_is_later_or_same - return true if f1 is later or same as f2
+ * @f1: the first fence from the same context
+ * @f2: the second fence from the same context
+ *
+ * Returns true if f1 is chronologically later than f2 or the same fence. Both
+ * fences must be from the same context, since a seqno is not re-used across
+ * contexts.
+ */
+static inline bool dma_fence_is_later_or_same(struct dma_fence *f1,
+ struct dma_fence *f2)
+{
+ return f1 == f2 || dma_fence_is_later(f1, f2);
+}
+
+/**
* dma_fence_later - return the chronologically later fence
* @f1: the first fence from the same context
* @f2: the second fence from the same context
diff --git a/include/linux/fb.h b/include/linux/fb.h
index 94e2c44c6569..05dc9624897d 100644
--- a/include/linux/fb.h
+++ b/include/linux/fb.h
@@ -536,6 +536,7 @@ extern ssize_t fb_io_read(struct fb_info *info, char __user *buf,
size_t count, loff_t *ppos);
extern ssize_t fb_io_write(struct fb_info *info, const char __user *buf,
size_t count, loff_t *ppos);
+int fb_io_mmap(struct fb_info *info, struct vm_area_struct *vma);
#define __FB_DEFAULT_IOMEM_OPS_RDWR \
.fb_read = fb_io_read, \
@@ -547,7 +548,7 @@ extern ssize_t fb_io_write(struct fb_info *info, const char __user *buf,
.fb_imageblit = cfb_imageblit
#define __FB_DEFAULT_IOMEM_OPS_MMAP \
- .fb_mmap = NULL /* default implementation */
+ .fb_mmap = fb_io_mmap
#define FB_DEFAULT_IOMEM_OPS \
__FB_DEFAULT_IOMEM_OPS_RDWR, \
@@ -848,7 +849,10 @@ static inline bool fb_modesetting_disabled(const char *drvname)
}
#endif
-/* Convenience logging macros */
+/*
+ * Convenience logging macros
+ */
+
#define fb_err(fb_info, fmt, ...) \
pr_err("fb%d: " fmt, (fb_info)->node, ##__VA_ARGS__)
#define fb_notice(info, fmt, ...) \
@@ -860,4 +864,12 @@ static inline bool fb_modesetting_disabled(const char *drvname)
#define fb_dbg(fb_info, fmt, ...) \
pr_debug("fb%d: " fmt, (fb_info)->node, ##__VA_ARGS__)
+#define fb_warn_once(fb_info, fmt, ...) \
+ pr_warn_once("fb%d: " fmt, (fb_info)->node, ##__VA_ARGS__)
+
+#define fb_WARN_ONCE(fb_info, condition, fmt, ...) \
+ WARN_ONCE(condition, "fb%d: " fmt, (fb_info)->node, ##__VA_ARGS__)
+#define fb_WARN_ON_ONCE(fb_info, x) \
+ fb_WARN_ONCE(fb_info, (x), "%s", "fb_WARN_ON_ONCE(" __stringify(x) ")")
+
#endif /* _LINUX_FB_H */
diff --git a/include/linux/fw_table.h b/include/linux/fw_table.h
index ff8fa58d5818..ca49947f0a77 100644
--- a/include/linux/fw_table.h
+++ b/include/linux/fw_table.h
@@ -25,9 +25,6 @@ struct acpi_subtable_proc {
int count;
};
-#include <linux/acpi.h>
-#include <acpi/acpi.h>
-
union acpi_subtable_headers {
struct acpi_subtable_header common;
struct acpi_hmat_structure hmat;
diff --git a/include/linux/highmem.h b/include/linux/highmem.h
index 4cacc0e43b51..be20cff4ba73 100644
--- a/include/linux/highmem.h
+++ b/include/linux/highmem.h
@@ -454,7 +454,7 @@ static inline void memcpy_from_folio(char *to, struct folio *folio,
memcpy(to, from, chunk);
kunmap_local(from);
- from += chunk;
+ to += chunk;
offset += chunk;
len -= chunk;
} while (len > 0);
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index d3acecc5db4b..236ec7b63c54 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -1268,10 +1268,7 @@ static inline bool __vma_shareable_lock(struct vm_area_struct *vma)
return (vma->vm_flags & VM_MAYSHARE) && vma->vm_private_data;
}
-static inline bool __vma_private_lock(struct vm_area_struct *vma)
-{
- return (!(vma->vm_flags & VM_MAYSHARE)) && vma->vm_private_data;
-}
+bool __vma_private_lock(struct vm_area_struct *vma);
/*
* Safe version of huge_pte_offset() to check the locks. See comments
diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h
index 958771bac9c0..c2ac9e9e7ee9 100644
--- a/include/linux/ieee80211.h
+++ b/include/linux/ieee80211.h
@@ -2830,12 +2830,14 @@ ieee80211_he_oper_size(const u8 *he_oper_ie)
static inline const struct ieee80211_he_6ghz_oper *
ieee80211_he_6ghz_oper(const struct ieee80211_he_operation *he_oper)
{
- const u8 *ret = (const void *)&he_oper->optional;
+ const u8 *ret;
u32 he_oper_params;
if (!he_oper)
return NULL;
+ ret = (const void *)&he_oper->optional;
+
he_oper_params = le32_to_cpu(he_oper->he_oper_params);
if (!(he_oper_params & IEEE80211_HE_OPERATION_6GHZ_OP_INFO))
diff --git a/include/linux/io_uring_types.h b/include/linux/io_uring_types.h
index d3009d56af0b..805bb635cdf5 100644
--- a/include/linux/io_uring_types.h
+++ b/include/linux/io_uring_types.h
@@ -340,6 +340,9 @@ struct io_ring_ctx {
struct list_head io_buffers_cache;
+ /* deferred free list, protected by ->uring_lock */
+ struct hlist_head io_buf_list;
+
/* Keep this last, we don't need it for the fast path */
struct wait_queue_head poll_wq;
struct io_restriction restrictions;
diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index ec289c1016f5..6291aa7b079b 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -845,6 +845,7 @@ static inline void dev_iommu_priv_set(struct device *dev, void *priv)
dev->iommu->priv = priv;
}
+extern struct mutex iommu_probe_device_lock;
int iommu_probe_device(struct device *dev);
int iommu_dev_enable_feature(struct device *dev, enum iommu_dev_features f);
diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h
index ab1da3142b06..0ff44d6633e3 100644
--- a/include/linux/kprobes.h
+++ b/include/linux/kprobes.h
@@ -139,7 +139,7 @@ static inline bool kprobe_ftrace(struct kprobe *p)
*
*/
struct kretprobe_holder {
- struct kretprobe *rp;
+ struct kretprobe __rcu *rp;
struct objpool_head pool;
};
@@ -197,10 +197,8 @@ extern int arch_trampoline_kprobe(struct kprobe *p);
#ifdef CONFIG_KRETPROBE_ON_RETHOOK
static nokprobe_inline struct kretprobe *get_kretprobe(struct kretprobe_instance *ri)
{
- RCU_LOCKDEP_WARN(!rcu_read_lock_any_held(),
- "Kretprobe is accessed from instance under preemptive context");
-
- return (struct kretprobe *)READ_ONCE(ri->node.rethook->data);
+ /* rethook::data is non-changed field, so that you can access it freely. */
+ return (struct kretprobe *)ri->node.rethook->data;
}
static nokprobe_inline unsigned long get_kretprobe_retaddr(struct kretprobe_instance *ri)
{
@@ -245,10 +243,7 @@ unsigned long kretprobe_trampoline_handler(struct pt_regs *regs,
static nokprobe_inline struct kretprobe *get_kretprobe(struct kretprobe_instance *ri)
{
- RCU_LOCKDEP_WARN(!rcu_read_lock_any_held(),
- "Kretprobe is accessed from instance under preemptive context");
-
- return READ_ONCE(ri->rph->rp);
+ return rcu_dereference_check(ri->rph->rp, rcu_read_lock_any_held());
}
static nokprobe_inline unsigned long get_kretprobe_retaddr(struct kretprobe_instance *ri)
diff --git a/include/linux/platform_data/x86/asus-wmi.h b/include/linux/platform_data/x86/asus-wmi.h
index 63e630276499..ab1c7deff118 100644
--- a/include/linux/platform_data/x86/asus-wmi.h
+++ b/include/linux/platform_data/x86/asus-wmi.h
@@ -114,6 +114,9 @@
/* Charging mode - 1=Barrel, 2=USB */
#define ASUS_WMI_DEVID_CHARGE_MODE 0x0012006C
+/* MCU powersave mode */
+#define ASUS_WMI_DEVID_MCU_POWERSAVE 0x001200E2
+
/* epu is connected? 1 == true */
#define ASUS_WMI_DEVID_EGPU_CONNECTED 0x00090018
/* egpu on/off */
diff --git a/include/linux/rethook.h b/include/linux/rethook.h
index ce69b2b7bc35..ba60962805f6 100644
--- a/include/linux/rethook.h
+++ b/include/linux/rethook.h
@@ -28,7 +28,12 @@ typedef void (*rethook_handler_t) (struct rethook_node *, void *, unsigned long,
*/
struct rethook {
void *data;
- rethook_handler_t handler;
+ /*
+ * To avoid sparse warnings, this uses a raw function pointer with
+ * __rcu, instead of rethook_handler_t. But this must be same as
+ * rethook_handler_t.
+ */
+ void (__rcu *handler) (struct rethook_node *, void *, unsigned long, struct pt_regs *);
struct objpool_head pool;
struct rcu_head rcu;
};
diff --git a/include/linux/skmsg.h b/include/linux/skmsg.h
index c1637515a8a4..c953b8c0d2f4 100644
--- a/include/linux/skmsg.h
+++ b/include/linux/skmsg.h
@@ -106,6 +106,7 @@ struct sk_psock {
struct mutex work_mutex;
struct sk_psock_work_state work_state;
struct delayed_work work;
+ struct sock *sk_pair;
struct rcu_work rwork;
};
diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h
index 0b4658a7eceb..dee5ad6e48c5 100644
--- a/include/linux/stmmac.h
+++ b/include/linux/stmmac.h
@@ -175,6 +175,7 @@ struct stmmac_fpe_cfg {
bool hs_enable; /* FPE handshake enable */
enum stmmac_fpe_state lp_fpe_state; /* Link Partner FPE state */
enum stmmac_fpe_state lo_fpe_state; /* Local station FPE state */
+ u32 fpe_csr; /* MAC_FPE_CTRL_STS reg cache */
};
struct stmmac_safety_feature_cfg {
diff --git a/include/linux/tcp.h b/include/linux/tcp.h
index 68f3d315d2e1..b646b574b060 100644
--- a/include/linux/tcp.h
+++ b/include/linux/tcp.h
@@ -169,7 +169,7 @@ struct tcp_request_sock {
#ifdef CONFIG_TCP_AO
u8 ao_keyid;
u8 ao_rcv_next;
- u8 maclen;
+ bool used_tcp_ao;
#endif
};
@@ -180,14 +180,10 @@ static inline struct tcp_request_sock *tcp_rsk(const struct request_sock *req)
static inline bool tcp_rsk_used_ao(const struct request_sock *req)
{
- /* The real length of MAC is saved in the request socket,
- * signing anything with zero-length makes no sense, so here is
- * a little hack..
- */
#ifndef CONFIG_TCP_AO
return false;
#else
- return tcp_rsk(req)->maclen != 0;
+ return tcp_rsk(req)->used_tcp_ao;
#endif
}
diff --git a/include/linux/units.h b/include/linux/units.h
index ff1bd6b5f5b3..45110daaf8d3 100644
--- a/include/linux/units.h
+++ b/include/linux/units.h
@@ -2,6 +2,7 @@
#ifndef _LINUX_UNITS_H
#define _LINUX_UNITS_H
+#include <linux/bits.h>
#include <linux/math.h>
/* Metric prefixes in accordance with Système international (d'unités) */
diff --git a/include/linux/usb/r8152.h b/include/linux/usb/r8152.h
index 287e9d83fb8b..33a4c146dc19 100644
--- a/include/linux/usb/r8152.h
+++ b/include/linux/usb/r8152.h
@@ -30,6 +30,7 @@
#define VENDOR_ID_NVIDIA 0x0955
#define VENDOR_ID_TPLINK 0x2357
#define VENDOR_ID_DLINK 0x2001
+#define VENDOR_ID_ASUS 0x0b05
#if IS_REACHABLE(CONFIG_USB_RTL8152)
extern u8 rtl8152_get_version(struct usb_interface *intf);
diff --git a/include/linux/vfio.h b/include/linux/vfio.h
index 454e9295970c..a65b2513f8cd 100644
--- a/include/linux/vfio.h
+++ b/include/linux/vfio.h
@@ -289,16 +289,12 @@ void vfio_combine_iova_ranges(struct rb_root_cached *root, u32 cur_nodes,
/*
* External user API
*/
-#if IS_ENABLED(CONFIG_VFIO_GROUP)
struct iommu_group *vfio_file_iommu_group(struct file *file);
+
+#if IS_ENABLED(CONFIG_VFIO_GROUP)
bool vfio_file_is_group(struct file *file);
bool vfio_file_has_dev(struct file *file, struct vfio_device *device);
#else
-static inline struct iommu_group *vfio_file_iommu_group(struct file *file)
-{
- return NULL;
-}
-
static inline bool vfio_file_is_group(struct file *file)
{
return false;
diff --git a/include/net/af_unix.h b/include/net/af_unix.h
index 824c258143a3..49c4640027d8 100644
--- a/include/net/af_unix.h
+++ b/include/net/af_unix.h
@@ -75,6 +75,7 @@ struct unix_sock {
};
#define unix_sk(ptr) container_of_const(ptr, struct unix_sock, sk)
+#define unix_peer(sk) (unix_sk(sk)->peer)
#define peer_wait peer_wq.wait
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index b137a33a1b68..4ecfb06c413d 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -9299,4 +9299,50 @@ bool cfg80211_valid_disable_subchannel_bitmap(u16 *bitmap,
*/
void cfg80211_links_removed(struct net_device *dev, u16 link_mask);
+#ifdef CONFIG_CFG80211_DEBUGFS
+/**
+ * wiphy_locked_debugfs_read - do a locked read in debugfs
+ * @wiphy: the wiphy to use
+ * @file: the file being read
+ * @buf: the buffer to fill and then read from
+ * @bufsize: size of the buffer
+ * @userbuf: the user buffer to copy to
+ * @count: read count
+ * @ppos: read position
+ * @handler: the read handler to call (under wiphy lock)
+ * @data: additional data to pass to the read handler
+ */
+ssize_t wiphy_locked_debugfs_read(struct wiphy *wiphy, struct file *file,
+ char *buf, size_t bufsize,
+ char __user *userbuf, size_t count,
+ loff_t *ppos,
+ ssize_t (*handler)(struct wiphy *wiphy,
+ struct file *file,
+ char *buf,
+ size_t bufsize,
+ void *data),
+ void *data);
+
+/**
+ * wiphy_locked_debugfs_write - do a locked write in debugfs
+ * @wiphy: the wiphy to use
+ * @file: the file being written to
+ * @buf: the buffer to copy the user data to
+ * @bufsize: size of the buffer
+ * @userbuf: the user buffer to copy from
+ * @count: read count
+ * @handler: the write handler to call (under wiphy lock)
+ * @data: additional data to pass to the write handler
+ */
+ssize_t wiphy_locked_debugfs_write(struct wiphy *wiphy, struct file *file,
+ char *buf, size_t bufsize,
+ const char __user *userbuf, size_t count,
+ ssize_t (*handler)(struct wiphy *wiphy,
+ struct file *file,
+ char *buf,
+ size_t count,
+ void *data),
+ void *data);
+#endif
+
#endif /* __NET_CFG80211_H */
diff --git a/include/net/genetlink.h b/include/net/genetlink.h
index e18a4c0d69ee..c53244f20437 100644
--- a/include/net/genetlink.h
+++ b/include/net/genetlink.h
@@ -12,10 +12,12 @@
* struct genl_multicast_group - generic netlink multicast group
* @name: name of the multicast group, names are per-family
* @flags: GENL_* flags (%GENL_ADMIN_PERM or %GENL_UNS_ADMIN_PERM)
+ * @cap_sys_admin: whether %CAP_SYS_ADMIN is required for binding
*/
struct genl_multicast_group {
char name[GENL_NAMSIZ];
u8 flags;
+ u8 cap_sys_admin:1;
};
struct genl_split_ops;
diff --git a/include/net/neighbour.h b/include/net/neighbour.h
index 07022bb0d44d..0d28172193fa 100644
--- a/include/net/neighbour.h
+++ b/include/net/neighbour.h
@@ -162,7 +162,7 @@ struct neighbour {
struct rcu_head rcu;
struct net_device *dev;
netdevice_tracker dev_tracker;
- u8 primary_key[0];
+ u8 primary_key[];
} __randomize_layout;
struct neigh_ops {
diff --git a/include/net/tcp.h b/include/net/tcp.h
index d2f0736b76b8..144ba48bb07b 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -1514,17 +1514,22 @@ static inline int tcp_full_space(const struct sock *sk)
return tcp_win_from_space(sk, READ_ONCE(sk->sk_rcvbuf));
}
-static inline void tcp_adjust_rcv_ssthresh(struct sock *sk)
+static inline void __tcp_adjust_rcv_ssthresh(struct sock *sk, u32 new_ssthresh)
{
int unused_mem = sk_unused_reserved_mem(sk);
struct tcp_sock *tp = tcp_sk(sk);
- tp->rcv_ssthresh = min(tp->rcv_ssthresh, 4U * tp->advmss);
+ tp->rcv_ssthresh = min(tp->rcv_ssthresh, new_ssthresh);
if (unused_mem)
tp->rcv_ssthresh = max_t(u32, tp->rcv_ssthresh,
tcp_win_from_space(sk, unused_mem));
}
+static inline void tcp_adjust_rcv_ssthresh(struct sock *sk)
+{
+ __tcp_adjust_rcv_ssthresh(sk, 4U * tcp_sk(sk)->advmss);
+}
+
void tcp_cleanup_rbuf(struct sock *sk, int copied);
void __tcp_cleanup_rbuf(struct sock *sk, int copied);
diff --git a/include/net/tcp_ao.h b/include/net/tcp_ao.h
index b56be10838f0..647781080613 100644
--- a/include/net/tcp_ao.h
+++ b/include/net/tcp_ao.h
@@ -62,11 +62,17 @@ static inline int tcp_ao_maclen(const struct tcp_ao_key *key)
return key->maclen;
}
+/* Use tcp_ao_len_aligned() for TCP header calculations */
static inline int tcp_ao_len(const struct tcp_ao_key *key)
{
return tcp_ao_maclen(key) + sizeof(struct tcp_ao_hdr);
}
+static inline int tcp_ao_len_aligned(const struct tcp_ao_key *key)
+{
+ return round_up(tcp_ao_len(key), 4);
+}
+
static inline unsigned int tcp_ao_digest_size(struct tcp_ao_key *key)
{
return key->digest_size;
diff --git a/include/rdma/ib_umem.h b/include/rdma/ib_umem.h
index 95896472a82b..565a85044541 100644
--- a/include/rdma/ib_umem.h
+++ b/include/rdma/ib_umem.h
@@ -77,6 +77,13 @@ static inline void __rdma_umem_block_iter_start(struct ib_block_iter *biter,
{
__rdma_block_iter_start(biter, umem->sgt_append.sgt.sgl,
umem->sgt_append.sgt.nents, pgsz);
+ biter->__sg_advance = ib_umem_offset(umem) & ~(pgsz - 1);
+ biter->__sg_numblocks = ib_umem_num_dma_blocks(umem, pgsz);
+}
+
+static inline bool __rdma_umem_block_iter_next(struct ib_block_iter *biter)
+{
+ return __rdma_block_iter_next(biter) && biter->__sg_numblocks--;
}
/**
@@ -92,7 +99,7 @@ static inline void __rdma_umem_block_iter_start(struct ib_block_iter *biter,
*/
#define rdma_umem_for_each_dma_block(umem, biter, pgsz) \
for (__rdma_umem_block_iter_start(biter, umem, pgsz); \
- __rdma_block_iter_next(biter);)
+ __rdma_umem_block_iter_next(biter);)
#ifdef CONFIG_INFINIBAND_USER_MEM
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index fb1a2d6b1969..b7b6b58dd348 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -2850,6 +2850,7 @@ struct ib_block_iter {
/* internal states */
struct scatterlist *__sg; /* sg holding the current aligned block */
dma_addr_t __dma_addr; /* unaligned DMA address of this block */
+ size_t __sg_numblocks; /* ib_umem_num_dma_blocks() */
unsigned int __sg_nents; /* number of SG entries */
unsigned int __sg_advance; /* number of bytes to advance in sg in next step */
unsigned int __pg_bit; /* alignment of current block */
diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
index 10480eb582b2..5ec1e71a09de 100644
--- a/include/scsi/scsi_device.h
+++ b/include/scsi/scsi_device.h
@@ -167,19 +167,25 @@ struct scsi_device {
* power state for system suspend/resume (suspend to RAM and
* hibernation) operations.
*/
- bool manage_system_start_stop;
+ unsigned manage_system_start_stop:1;
/*
* If true, let the high-level device driver (sd) manage the device
* power state for runtime device suspand and resume operations.
*/
- bool manage_runtime_start_stop;
+ unsigned manage_runtime_start_stop:1;
/*
* If true, let the high-level device driver (sd) manage the device
* power state for system shutdown (power off) operations.
*/
- bool manage_shutdown;
+ unsigned manage_shutdown:1;
+
+ /*
+ * If set and if the device is runtime suspended, ask the high-level
+ * device driver (sd) to force a runtime resume of the device.
+ */
+ unsigned force_runtime_start_on_system_start:1;
unsigned removable:1;
unsigned changed:1; /* Data invalid due to media change */
diff --git a/include/sound/cs35l41.h b/include/sound/cs35l41.h
index 043f8ac65dbf..68e053fe7340 100644
--- a/include/sound/cs35l41.h
+++ b/include/sound/cs35l41.h
@@ -906,6 +906,6 @@ int cs35l41_init_boost(struct device *dev, struct regmap *regmap,
bool cs35l41_safe_reset(struct regmap *regmap, enum cs35l41_boost_type b_type);
int cs35l41_mdsync_up(struct regmap *regmap);
int cs35l41_global_enable(struct device *dev, struct regmap *regmap, enum cs35l41_boost_type b_type,
- int enable, bool firmware_running);
+ int enable, struct cs_dsp *dsp);
#endif /* __CS35L41_H */
diff --git a/include/uapi/drm/drm.h b/include/uapi/drm/drm.h
index 8662b5aeea0c..16122819edfe 100644
--- a/include/uapi/drm/drm.h
+++ b/include/uapi/drm/drm.h
@@ -713,7 +713,8 @@ struct drm_gem_open {
/**
* DRM_CAP_ASYNC_PAGE_FLIP
*
- * If set to 1, the driver supports &DRM_MODE_PAGE_FLIP_ASYNC.
+ * If set to 1, the driver supports &DRM_MODE_PAGE_FLIP_ASYNC for legacy
+ * page-flips.
*/
#define DRM_CAP_ASYNC_PAGE_FLIP 0x7
/**
@@ -773,6 +774,13 @@ struct drm_gem_open {
* :ref:`drm_sync_objects`.
*/
#define DRM_CAP_SYNCOBJ_TIMELINE 0x14
+/**
+ * DRM_CAP_ATOMIC_ASYNC_PAGE_FLIP
+ *
+ * If set to 1, the driver supports &DRM_MODE_PAGE_FLIP_ASYNC for atomic
+ * commits.
+ */
+#define DRM_CAP_ATOMIC_ASYNC_PAGE_FLIP 0x15
/* DRM_IOCTL_GET_CAP ioctl argument type */
struct drm_get_cap {
@@ -842,6 +850,31 @@ struct drm_get_cap {
*/
#define DRM_CLIENT_CAP_WRITEBACK_CONNECTORS 5
+/**
+ * DRM_CLIENT_CAP_CURSOR_PLANE_HOTSPOT
+ *
+ * Drivers for para-virtualized hardware (e.g. vmwgfx, qxl, virtio and
+ * virtualbox) have additional restrictions for cursor planes (thus
+ * making cursor planes on those drivers not truly universal,) e.g.
+ * they need cursor planes to act like one would expect from a mouse
+ * cursor and have correctly set hotspot properties.
+ * If this client cap is not set the DRM core will hide cursor plane on
+ * those virtualized drivers because not setting it implies that the
+ * client is not capable of dealing with those extra restictions.
+ * Clients which do set cursor hotspot and treat the cursor plane
+ * like a mouse cursor should set this property.
+ * The client must enable &DRM_CLIENT_CAP_ATOMIC first.
+ *
+ * Setting this property on drivers which do not special case
+ * cursor planes (i.e. non-virtualized drivers) will return
+ * EOPNOTSUPP, which can be used by userspace to gauge
+ * requirements of the hardware/drivers they're running on.
+ *
+ * This capability is always supported for atomic-capable virtualized
+ * drivers starting from kernel version 6.6.
+ */
+#define DRM_CLIENT_CAP_CURSOR_PLANE_HOTSPOT 6
+
/* DRM_IOCTL_SET_CLIENT_CAP ioctl argument type */
struct drm_set_client_cap {
__u64 capability;
@@ -893,6 +926,7 @@ struct drm_syncobj_transfer {
#define DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL (1 << 0)
#define DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT (1 << 1)
#define DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE (1 << 2) /* wait for time point to become available */
+#define DRM_SYNCOBJ_WAIT_FLAGS_WAIT_DEADLINE (1 << 3) /* set fence deadline to deadline_nsec */
struct drm_syncobj_wait {
__u64 handles;
/* absolute timeout */
@@ -901,6 +935,14 @@ struct drm_syncobj_wait {
__u32 flags;
__u32 first_signaled; /* only valid when not waiting all */
__u32 pad;
+ /**
+ * @deadline_nsec - fence deadline hint
+ *
+ * Deadline hint, in absolute CLOCK_MONOTONIC, to set on backing
+ * fence(s) if the DRM_SYNCOBJ_WAIT_FLAGS_WAIT_DEADLINE flag is
+ * set.
+ */
+ __u64 deadline_nsec;
};
struct drm_syncobj_timeline_wait {
@@ -913,6 +955,14 @@ struct drm_syncobj_timeline_wait {
__u32 flags;
__u32 first_signaled; /* only valid when not waiting all */
__u32 pad;
+ /**
+ * @deadline_nsec - fence deadline hint
+ *
+ * Deadline hint, in absolute CLOCK_MONOTONIC, to set on backing
+ * fence(s) if the DRM_SYNCOBJ_WAIT_FLAGS_WAIT_DEADLINE flag is
+ * set.
+ */
+ __u64 deadline_nsec;
};
/**
diff --git a/include/uapi/drm/drm_mode.h b/include/uapi/drm/drm_mode.h
index 09e7a471ee30..95630f170110 100644
--- a/include/uapi/drm/drm_mode.h
+++ b/include/uapi/drm/drm_mode.h
@@ -957,6 +957,15 @@ struct hdr_output_metadata {
* Request that the page-flip is performed as soon as possible, ie. with no
* delay due to waiting for vblank. This may cause tearing to be visible on
* the screen.
+ *
+ * When used with atomic uAPI, the driver will return an error if the hardware
+ * doesn't support performing an asynchronous page-flip for this update.
+ * User-space should handle this, e.g. by falling back to a regular page-flip.
+ *
+ * Note, some hardware might need to perform one last synchronous page-flip
+ * before being able to switch to asynchronous page-flips. As an exception,
+ * the driver will return success even though that first page-flip is not
+ * asynchronous.
*/
#define DRM_MODE_PAGE_FLIP_ASYNC 0x02
#define DRM_MODE_PAGE_FLIP_TARGET_ABSOLUTE 0x4
diff --git a/include/uapi/drm/pvr_drm.h b/include/uapi/drm/pvr_drm.h
index 1834375390c4..ccf6c2112468 100644
--- a/include/uapi/drm/pvr_drm.h
+++ b/include/uapi/drm/pvr_drm.h
@@ -741,20 +741,18 @@ enum drm_pvr_ctx_priority {
*/
enum drm_pvr_ctx_type {
/**
- * @DRM_PVR_CTX_TYPE_RENDER: Render context. Use &struct
- * drm_pvr_ioctl_create_render_context_args for context creation arguments.
+ * @DRM_PVR_CTX_TYPE_RENDER: Render context.
*/
DRM_PVR_CTX_TYPE_RENDER = 0,
/**
- * @DRM_PVR_CTX_TYPE_COMPUTE: Compute context. Use &struct
- * drm_pvr_ioctl_create_compute_context_args for context creation arguments.
+ * @DRM_PVR_CTX_TYPE_COMPUTE: Compute context.
*/
DRM_PVR_CTX_TYPE_COMPUTE,
/**
- * @DRM_PVR_CTX_TYPE_TRANSFER_FRAG: Transfer context for fragment data masters. Use
- * &struct drm_pvr_ioctl_create_transfer_context_args for context creation arguments.
+ * @DRM_PVR_CTX_TYPE_TRANSFER_FRAG: Transfer context for fragment data
+ * master.
*/
DRM_PVR_CTX_TYPE_TRANSFER_FRAG,
};
diff --git a/include/uapi/drm/v3d_drm.h b/include/uapi/drm/v3d_drm.h
index 1a7d7a689de3..dce1835eced4 100644
--- a/include/uapi/drm/v3d_drm.h
+++ b/include/uapi/drm/v3d_drm.h
@@ -41,6 +41,7 @@ extern "C" {
#define DRM_V3D_PERFMON_CREATE 0x08
#define DRM_V3D_PERFMON_DESTROY 0x09
#define DRM_V3D_PERFMON_GET_VALUES 0x0a
+#define DRM_V3D_SUBMIT_CPU 0x0b
#define DRM_IOCTL_V3D_SUBMIT_CL DRM_IOWR(DRM_COMMAND_BASE + DRM_V3D_SUBMIT_CL, struct drm_v3d_submit_cl)
#define DRM_IOCTL_V3D_WAIT_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_V3D_WAIT_BO, struct drm_v3d_wait_bo)
@@ -56,6 +57,7 @@ extern "C" {
struct drm_v3d_perfmon_destroy)
#define DRM_IOCTL_V3D_PERFMON_GET_VALUES DRM_IOWR(DRM_COMMAND_BASE + DRM_V3D_PERFMON_GET_VALUES, \
struct drm_v3d_perfmon_get_values)
+#define DRM_IOCTL_V3D_SUBMIT_CPU DRM_IOW(DRM_COMMAND_BASE + DRM_V3D_SUBMIT_CPU, struct drm_v3d_submit_cpu)
#define DRM_V3D_SUBMIT_CL_FLUSH_CACHE 0x01
#define DRM_V3D_SUBMIT_EXTENSION 0x02
@@ -69,7 +71,13 @@ extern "C" {
struct drm_v3d_extension {
__u64 next;
__u32 id;
-#define DRM_V3D_EXT_ID_MULTI_SYNC 0x01
+#define DRM_V3D_EXT_ID_MULTI_SYNC 0x01
+#define DRM_V3D_EXT_ID_CPU_INDIRECT_CSD 0x02
+#define DRM_V3D_EXT_ID_CPU_TIMESTAMP_QUERY 0x03
+#define DRM_V3D_EXT_ID_CPU_RESET_TIMESTAMP_QUERY 0x04
+#define DRM_V3D_EXT_ID_CPU_COPY_TIMESTAMP_QUERY 0x05
+#define DRM_V3D_EXT_ID_CPU_RESET_PERFORMANCE_QUERY 0x06
+#define DRM_V3D_EXT_ID_CPU_COPY_PERFORMANCE_QUERY 0x07
__u32 flags; /* mbz */
};
@@ -93,6 +101,7 @@ enum v3d_queue {
V3D_TFU,
V3D_CSD,
V3D_CACHE_CLEAN,
+ V3D_CPU,
};
/**
@@ -276,6 +285,7 @@ enum drm_v3d_param {
DRM_V3D_PARAM_SUPPORTS_CACHE_FLUSH,
DRM_V3D_PARAM_SUPPORTS_PERFMON,
DRM_V3D_PARAM_SUPPORTS_MULTISYNC_EXT,
+ DRM_V3D_PARAM_SUPPORTS_CPU_QUEUE,
};
struct drm_v3d_get_param {
@@ -361,6 +371,234 @@ struct drm_v3d_submit_csd {
__u32 pad;
};
+/**
+ * struct drm_v3d_indirect_csd - ioctl extension for the CPU job to create an
+ * indirect CSD
+ *
+ * When an extension of DRM_V3D_EXT_ID_CPU_INDIRECT_CSD id is defined, it
+ * points to this extension to define a indirect CSD submission. It creates a
+ * CPU job linked to a CSD job. The CPU job waits for the indirect CSD
+ * dependencies and, once they are signaled, it updates the CSD job config
+ * before allowing the CSD job execution.
+ */
+struct drm_v3d_indirect_csd {
+ struct drm_v3d_extension base;
+
+ /* Indirect CSD */
+ struct drm_v3d_submit_csd submit;
+
+ /* Handle of the indirect BO, that should be also attached to the
+ * indirect CSD.
+ */
+ __u32 indirect;
+
+ /* Offset within the BO where the workgroup counts are stored */
+ __u32 offset;
+
+ /* Workgroups size */
+ __u32 wg_size;
+
+ /* Indices of the uniforms with the workgroup dispatch counts
+ * in the uniform stream. If the uniform rewrite is not needed,
+ * the offset must be 0xffffffff.
+ */
+ __u32 wg_uniform_offsets[3];
+};
+
+/**
+ * struct drm_v3d_timestamp_query - ioctl extension for the CPU job to calculate
+ * a timestamp query
+ *
+ * When an extension DRM_V3D_EXT_ID_TIMESTAMP_QUERY is defined, it points to
+ * this extension to define a timestamp query submission. This CPU job will
+ * calculate the timestamp query and update the query value within the
+ * timestamp BO. Moreover, it will signal the timestamp syncobj to indicate
+ * query availability.
+ */
+struct drm_v3d_timestamp_query {
+ struct drm_v3d_extension base;
+
+ /* Array of queries' offsets within the timestamp BO for their value */
+ __u64 offsets;
+
+ /* Array of timestamp's syncobjs to indicate its availability */
+ __u64 syncs;
+
+ /* Number of queries */
+ __u32 count;
+
+ /* mbz */
+ __u32 pad;
+};
+
+/**
+ * struct drm_v3d_reset_timestamp_query - ioctl extension for the CPU job to
+ * reset timestamp queries
+ *
+ * When an extension DRM_V3D_EXT_ID_CPU_RESET_TIMESTAMP_QUERY is defined, it
+ * points to this extension to define a reset timestamp submission. This CPU
+ * job will reset the timestamp queries based on value offset of the first
+ * query. Moreover, it will reset the timestamp syncobj to reset query
+ * availability.
+ */
+struct drm_v3d_reset_timestamp_query {
+ struct drm_v3d_extension base;
+
+ /* Array of timestamp's syncobjs to indicate its availability */
+ __u64 syncs;
+
+ /* Offset of the first query within the timestamp BO for its value */
+ __u32 offset;
+
+ /* Number of queries */
+ __u32 count;
+};
+
+/**
+ * struct drm_v3d_copy_timestamp_query - ioctl extension for the CPU job to copy
+ * query results to a buffer
+ *
+ * When an extension DRM_V3D_EXT_ID_CPU_COPY_TIMESTAMP_QUERY is defined, it
+ * points to this extension to define a copy timestamp query submission. This
+ * CPU job will copy the timestamp queries results to a BO with the offset
+ * and stride defined in the extension.
+ */
+struct drm_v3d_copy_timestamp_query {
+ struct drm_v3d_extension base;
+
+ /* Define if should write to buffer using 64 or 32 bits */
+ __u8 do_64bit;
+
+ /* Define if it can write to buffer even if the query is not available */
+ __u8 do_partial;
+
+ /* Define if it should write availability bit to buffer */
+ __u8 availability_bit;
+
+ /* mbz */
+ __u8 pad;
+
+ /* Offset of the buffer in the BO */
+ __u32 offset;
+
+ /* Stride of the buffer in the BO */
+ __u32 stride;
+
+ /* Number of queries */
+ __u32 count;
+
+ /* Array of queries' offsets within the timestamp BO for their value */
+ __u64 offsets;
+
+ /* Array of timestamp's syncobjs to indicate its availability */
+ __u64 syncs;
+};
+
+/**
+ * struct drm_v3d_reset_performance_query - ioctl extension for the CPU job to
+ * reset performance queries
+ *
+ * When an extension DRM_V3D_EXT_ID_CPU_RESET_PERFORMANCE_QUERY is defined, it
+ * points to this extension to define a reset performance submission. This CPU
+ * job will reset the performance queries by resetting the values of the
+ * performance monitors. Moreover, it will reset the syncobj to reset query
+ * availability.
+ */
+struct drm_v3d_reset_performance_query {
+ struct drm_v3d_extension base;
+
+ /* Array of performance queries's syncobjs to indicate its availability */
+ __u64 syncs;
+
+ /* Number of queries */
+ __u32 count;
+
+ /* Number of performance monitors */
+ __u32 nperfmons;
+
+ /* Array of u64 user-pointers that point to an array of kperfmon_ids */
+ __u64 kperfmon_ids;
+};
+
+/**
+ * struct drm_v3d_copy_performance_query - ioctl extension for the CPU job to copy
+ * performance query results to a buffer
+ *
+ * When an extension DRM_V3D_EXT_ID_CPU_COPY_PERFORMANCE_QUERY is defined, it
+ * points to this extension to define a copy performance query submission. This
+ * CPU job will copy the performance queries results to a BO with the offset
+ * and stride defined in the extension.
+ */
+struct drm_v3d_copy_performance_query {
+ struct drm_v3d_extension base;
+
+ /* Define if should write to buffer using 64 or 32 bits */
+ __u8 do_64bit;
+
+ /* Define if it can write to buffer even if the query is not available */
+ __u8 do_partial;
+
+ /* Define if it should write availability bit to buffer */
+ __u8 availability_bit;
+
+ /* mbz */
+ __u8 pad;
+
+ /* Offset of the buffer in the BO */
+ __u32 offset;
+
+ /* Stride of the buffer in the BO */
+ __u32 stride;
+
+ /* Number of performance monitors */
+ __u32 nperfmons;
+
+ /* Number of performance counters related to this query pool */
+ __u32 ncounters;
+
+ /* Number of queries */
+ __u32 count;
+
+ /* Array of performance queries's syncobjs to indicate its availability */
+ __u64 syncs;
+
+ /* Array of u64 user-pointers that point to an array of kperfmon_ids */
+ __u64 kperfmon_ids;
+};
+
+struct drm_v3d_submit_cpu {
+ /* Pointer to a u32 array of the BOs that are referenced by the job.
+ *
+ * For DRM_V3D_EXT_ID_CPU_INDIRECT_CSD, it must contain only one BO,
+ * that contains the workgroup counts.
+ *
+ * For DRM_V3D_EXT_ID_TIMESTAMP_QUERY, it must contain only one BO,
+ * that will contain the timestamp.
+ *
+ * For DRM_V3D_EXT_ID_CPU_RESET_TIMESTAMP_QUERY, it must contain only
+ * one BO, that contains the timestamp.
+ *
+ * For DRM_V3D_EXT_ID_CPU_COPY_TIMESTAMP_QUERY, it must contain two
+ * BOs. The first is the BO where the timestamp queries will be written
+ * to. The second is the BO that contains the timestamp.
+ *
+ * For DRM_V3D_EXT_ID_CPU_RESET_PERFORMANCE_QUERY, it must contain no
+ * BOs.
+ *
+ * For DRM_V3D_EXT_ID_CPU_COPY_PERFORMANCE_QUERY, it must contain one
+ * BO, where the performance queries will be written.
+ */
+ __u64 bo_handles;
+
+ /* Number of BO handles passed in (size is that times 4). */
+ __u32 bo_handle_count;
+
+ __u32 flags;
+
+ /* Pointer to an array of ioctl extensions*/
+ __u64 extensions;
+};
+
enum {
V3D_PERFCNT_FEP_VALID_PRIMTS_NO_PIXELS,
V3D_PERFCNT_FEP_VALID_PRIMS,
diff --git a/include/uapi/linux/stddef.h b/include/uapi/linux/stddef.h
index 5c6c4269f7ef..2ec6f35cda32 100644
--- a/include/uapi/linux/stddef.h
+++ b/include/uapi/linux/stddef.h
@@ -27,7 +27,7 @@
union { \
struct { MEMBERS } ATTRS; \
struct TAG { MEMBERS } ATTRS NAME; \
- }
+ } ATTRS
#ifdef __cplusplus
/* sizeof(struct{}) is 1 in C++, not 0, can't use C version of the macro. */
diff --git a/include/uapi/linux/sync_file.h b/include/uapi/linux/sync_file.h
index ff0a931833e2..ff1f38889dcf 100644
--- a/include/uapi/linux/sync_file.h
+++ b/include/uapi/linux/sync_file.h
@@ -76,6 +76,27 @@ struct sync_file_info {
__u64 sync_fence_info;
};
+/**
+ * struct sync_set_deadline - SYNC_IOC_SET_DEADLINE - set a deadline hint on a fence
+ * @deadline_ns: absolute time of the deadline
+ * @pad: must be zero
+ *
+ * Allows userspace to set a deadline on a fence, see &dma_fence_set_deadline
+ *
+ * The timebase for the deadline is CLOCK_MONOTONIC (same as vblank). For
+ * example
+ *
+ * clock_gettime(CLOCK_MONOTONIC, &t);
+ * deadline_ns = (t.tv_sec * 1000000000L) + t.tv_nsec + ns_until_deadline
+ */
+struct sync_set_deadline {
+ __u64 deadline_ns;
+ /* Not strictly needed for alignment but gives some possibility
+ * for future extension:
+ */
+ __u64 pad;
+};
+
#define SYNC_IOC_MAGIC '>'
/*
@@ -87,5 +108,6 @@ struct sync_file_info {
#define SYNC_IOC_MERGE _IOWR(SYNC_IOC_MAGIC, 3, struct sync_merge_data)
#define SYNC_IOC_FILE_INFO _IOWR(SYNC_IOC_MAGIC, 4, struct sync_file_info)
+#define SYNC_IOC_SET_DEADLINE _IOW(SYNC_IOC_MAGIC, 5, struct sync_set_deadline)
#endif /* _UAPI_LINUX_SYNC_H */
diff --git a/include/uapi/linux/v4l2-subdev.h b/include/uapi/linux/v4l2-subdev.h
index 4a195b68f28f..b383c2fe0cf3 100644
--- a/include/uapi/linux/v4l2-subdev.h
+++ b/include/uapi/linux/v4l2-subdev.h
@@ -239,7 +239,7 @@ struct v4l2_subdev_routing {
* set (which is the default), the 'stream' fields will be forced to 0 by the
* kernel.
*/
- #define V4L2_SUBDEV_CLIENT_CAP_STREAMS (1U << 0)
+ #define V4L2_SUBDEV_CLIENT_CAP_STREAMS (1ULL << 0)
/**
* struct v4l2_subdev_client_capability - Capabilities of the client accessing
diff --git a/io_uring/cancel.c b/io_uring/cancel.c
index 3c19cccb1aec..8a8b07dfc444 100644
--- a/io_uring/cancel.c
+++ b/io_uring/cancel.c
@@ -273,7 +273,7 @@ int io_sync_cancel(struct io_ring_ctx *ctx, void __user *arg)
};
ktime_t timeout = KTIME_MAX;
struct io_uring_sync_cancel_reg sc;
- struct fd f = { };
+ struct file *file = NULL;
DEFINE_WAIT(wait);
int ret, i;
@@ -295,10 +295,10 @@ int io_sync_cancel(struct io_ring_ctx *ctx, void __user *arg)
/* we can grab a normal file descriptor upfront */
if ((cd.flags & IORING_ASYNC_CANCEL_FD) &&
!(cd.flags & IORING_ASYNC_CANCEL_FD_FIXED)) {
- f = fdget(sc.fd);
- if (!f.file)
+ file = fget(sc.fd);
+ if (!file)
return -EBADF;
- cd.file = f.file;
+ cd.file = file;
}
ret = __io_sync_cancel(current->io_uring, &cd, sc.fd);
@@ -348,6 +348,7 @@ int io_sync_cancel(struct io_ring_ctx *ctx, void __user *arg)
if (ret == -ENOENT || ret > 0)
ret = 0;
out:
- fdput(f);
+ if (file)
+ fput(file);
return ret;
}
diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
index ed254076c723..9626a363f121 100644
--- a/io_uring/io_uring.c
+++ b/io_uring/io_uring.c
@@ -271,6 +271,7 @@ static __cold void io_fallback_req_func(struct work_struct *work)
struct io_kiocb *req, *tmp;
struct io_tw_state ts = { .locked = true, };
+ percpu_ref_get(&ctx->refs);
mutex_lock(&ctx->uring_lock);
llist_for_each_entry_safe(req, tmp, node, io_task_work.node)
req->io_task_work.func(req, &ts);
@@ -278,6 +279,7 @@ static __cold void io_fallback_req_func(struct work_struct *work)
return;
io_submit_flush_completions(ctx);
mutex_unlock(&ctx->uring_lock);
+ percpu_ref_put(&ctx->refs);
}
static int io_alloc_hash_table(struct io_hash_table *table, unsigned bits)
@@ -325,6 +327,7 @@ static __cold struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
INIT_LIST_HEAD(&ctx->sqd_list);
INIT_LIST_HEAD(&ctx->cq_overflow_list);
INIT_LIST_HEAD(&ctx->io_buffers_cache);
+ INIT_HLIST_HEAD(&ctx->io_buf_list);
io_alloc_cache_init(&ctx->rsrc_node_cache, IO_NODE_ALLOC_CACHE_MAX,
sizeof(struct io_rsrc_node));
io_alloc_cache_init(&ctx->apoll_cache, IO_ALLOC_CACHE_MAX,
@@ -2666,7 +2669,7 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
return READ_ONCE(rings->cq.head) == READ_ONCE(rings->cq.tail) ? ret : 0;
}
-static void io_mem_free(void *ptr)
+void io_mem_free(void *ptr)
{
if (!ptr)
return;
@@ -2697,6 +2700,7 @@ static void *__io_uaddr_map(struct page ***pages, unsigned short *npages,
{
struct page **page_array;
unsigned int nr_pages;
+ void *page_addr;
int ret, i;
*npages = 0;
@@ -2718,27 +2722,29 @@ err:
io_pages_free(&page_array, ret > 0 ? ret : 0);
return ret < 0 ? ERR_PTR(ret) : ERR_PTR(-EFAULT);
}
- /*
- * Should be a single page. If the ring is small enough that we can
- * use a normal page, that is fine. If we need multiple pages, then
- * userspace should use a huge page. That's the only way to guarantee
- * that we get contigious memory, outside of just being lucky or
- * (currently) having low memory fragmentation.
- */
- if (page_array[0] != page_array[ret - 1])
- goto err;
- /*
- * Can't support mapping user allocated ring memory on 32-bit archs
- * where it could potentially reside in highmem. Just fail those with
- * -EINVAL, just like we did on kernels that didn't support this
- * feature.
- */
+ page_addr = page_address(page_array[0]);
for (i = 0; i < nr_pages; i++) {
- if (PageHighMem(page_array[i])) {
- ret = -EINVAL;
+ ret = -EINVAL;
+
+ /*
+ * Can't support mapping user allocated ring memory on 32-bit
+ * archs where it could potentially reside in highmem. Just
+ * fail those with -EINVAL, just like we did on kernels that
+ * didn't support this feature.
+ */
+ if (PageHighMem(page_array[i]))
goto err;
- }
+
+ /*
+ * No support for discontig pages for now, should either be a
+ * single normal page, or a huge page. Later on we can add
+ * support for remapping discontig pages, for now we will
+ * just fail them with EINVAL.
+ */
+ if (page_address(page_array[i]) != page_addr)
+ goto err;
+ page_addr += PAGE_SIZE;
}
*pages = page_array;
@@ -2775,7 +2781,7 @@ static void io_rings_free(struct io_ring_ctx *ctx)
}
}
-static void *io_mem_alloc(size_t size)
+void *io_mem_alloc(size_t size)
{
gfp_t gfp = GFP_KERNEL_ACCOUNT | __GFP_ZERO | __GFP_NOWARN | __GFP_COMP;
void *ret;
@@ -2947,6 +2953,7 @@ static __cold void io_ring_ctx_free(struct io_ring_ctx *ctx)
ctx->mm_account = NULL;
}
io_rings_free(ctx);
+ io_kbuf_mmap_list_free(ctx);
percpu_ref_exit(&ctx->refs);
free_uid(ctx->user);
@@ -3141,12 +3148,7 @@ static __cold void io_ring_exit_work(struct work_struct *work)
init_completion(&exit.completion);
init_task_work(&exit.task_work, io_tctx_exit_cb);
exit.ctx = ctx;
- /*
- * Some may use context even when all refs and requests have been put,
- * and they are free to do so while still holding uring_lock or
- * completion_lock, see io_req_task_submit(). Apart from other work,
- * this lock/unlock section also waits them to finish.
- */
+
mutex_lock(&ctx->uring_lock);
while (!list_empty(&ctx->tctx_list)) {
WARN_ON_ONCE(time_after(jiffies, timeout));
@@ -3475,25 +3477,27 @@ static void *io_uring_validate_mmap_request(struct file *file,
struct page *page;
void *ptr;
- /* Don't allow mmap if the ring was setup without it */
- if (ctx->flags & IORING_SETUP_NO_MMAP)
- return ERR_PTR(-EINVAL);
-
switch (offset & IORING_OFF_MMAP_MASK) {
case IORING_OFF_SQ_RING:
case IORING_OFF_CQ_RING:
+ /* Don't allow mmap if the ring was setup without it */
+ if (ctx->flags & IORING_SETUP_NO_MMAP)
+ return ERR_PTR(-EINVAL);
ptr = ctx->rings;
break;
case IORING_OFF_SQES:
+ /* Don't allow mmap if the ring was setup without it */
+ if (ctx->flags & IORING_SETUP_NO_MMAP)
+ return ERR_PTR(-EINVAL);
ptr = ctx->sq_sqes;
break;
case IORING_OFF_PBUF_RING: {
unsigned int bgid;
bgid = (offset & ~IORING_OFF_MMAP_MASK) >> IORING_OFF_PBUF_SHIFT;
- mutex_lock(&ctx->uring_lock);
+ rcu_read_lock();
ptr = io_pbuf_get_address(ctx, bgid);
- mutex_unlock(&ctx->uring_lock);
+ rcu_read_unlock();
if (!ptr)
return ERR_PTR(-EINVAL);
break;
@@ -3645,7 +3649,7 @@ SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit,
size_t, argsz)
{
struct io_ring_ctx *ctx;
- struct fd f;
+ struct file *file;
long ret;
if (unlikely(flags & ~(IORING_ENTER_GETEVENTS | IORING_ENTER_SQ_WAKEUP |
@@ -3663,20 +3667,19 @@ SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit,
if (unlikely(!tctx || fd >= IO_RINGFD_REG_MAX))
return -EINVAL;
fd = array_index_nospec(fd, IO_RINGFD_REG_MAX);
- f.file = tctx->registered_rings[fd];
- f.flags = 0;
- if (unlikely(!f.file))
+ file = tctx->registered_rings[fd];
+ if (unlikely(!file))
return -EBADF;
} else {
- f = fdget(fd);
- if (unlikely(!f.file))
+ file = fget(fd);
+ if (unlikely(!file))
return -EBADF;
ret = -EOPNOTSUPP;
- if (unlikely(!io_is_uring_fops(f.file)))
+ if (unlikely(!io_is_uring_fops(file)))
goto out;
}
- ctx = f.file->private_data;
+ ctx = file->private_data;
ret = -EBADFD;
if (unlikely(ctx->flags & IORING_SETUP_R_DISABLED))
goto out;
@@ -3770,7 +3773,8 @@ iopoll_locked:
}
}
out:
- fdput(f);
+ if (!(flags & IORING_ENTER_REGISTERED_RING))
+ fput(file);
return ret;
}
@@ -4611,7 +4615,7 @@ SYSCALL_DEFINE4(io_uring_register, unsigned int, fd, unsigned int, opcode,
{
struct io_ring_ctx *ctx;
long ret = -EBADF;
- struct fd f;
+ struct file *file;
bool use_registered_ring;
use_registered_ring = !!(opcode & IORING_REGISTER_USE_REGISTERED_RING);
@@ -4630,27 +4634,27 @@ SYSCALL_DEFINE4(io_uring_register, unsigned int, fd, unsigned int, opcode,
if (unlikely(!tctx || fd >= IO_RINGFD_REG_MAX))
return -EINVAL;
fd = array_index_nospec(fd, IO_RINGFD_REG_MAX);
- f.file = tctx->registered_rings[fd];
- f.flags = 0;
- if (unlikely(!f.file))
+ file = tctx->registered_rings[fd];
+ if (unlikely(!file))
return -EBADF;
} else {
- f = fdget(fd);
- if (unlikely(!f.file))
+ file = fget(fd);
+ if (unlikely(!file))
return -EBADF;
ret = -EOPNOTSUPP;
- if (!io_is_uring_fops(f.file))
+ if (!io_is_uring_fops(file))
goto out_fput;
}
- ctx = f.file->private_data;
+ ctx = file->private_data;
mutex_lock(&ctx->uring_lock);
ret = __io_uring_register(ctx, opcode, arg, nr_args);
mutex_unlock(&ctx->uring_lock);
trace_io_uring_register(ctx, opcode, ctx->nr_user_files, ctx->nr_user_bufs, ret);
out_fput:
- fdput(f);
+ if (!use_registered_ring)
+ fput(file);
return ret;
}
diff --git a/io_uring/io_uring.h b/io_uring/io_uring.h
index dc6d779b452b..ed84f2737b3a 100644
--- a/io_uring/io_uring.h
+++ b/io_uring/io_uring.h
@@ -86,6 +86,9 @@ bool __io_alloc_req_refill(struct io_ring_ctx *ctx);
bool io_match_task_safe(struct io_kiocb *head, struct task_struct *task,
bool cancel_all);
+void *io_mem_alloc(size_t size);
+void io_mem_free(void *ptr);
+
#if defined(CONFIG_PROVE_LOCKING)
static inline void io_lockdep_assert_cq_locked(struct io_ring_ctx *ctx)
{
diff --git a/io_uring/kbuf.c b/io_uring/kbuf.c
index a1e4239c7d75..72b6af1d2ed3 100644
--- a/io_uring/kbuf.c
+++ b/io_uring/kbuf.c
@@ -33,19 +33,42 @@ struct io_provide_buf {
__u16 bid;
};
+struct io_buf_free {
+ struct hlist_node list;
+ void *mem;
+ size_t size;
+ int inuse;
+};
+
+static struct io_buffer_list *__io_buffer_get_list(struct io_ring_ctx *ctx,
+ struct io_buffer_list *bl,
+ unsigned int bgid)
+{
+ if (bl && bgid < BGID_ARRAY)
+ return &bl[bgid];
+
+ return xa_load(&ctx->io_bl_xa, bgid);
+}
+
static inline struct io_buffer_list *io_buffer_get_list(struct io_ring_ctx *ctx,
unsigned int bgid)
{
- if (ctx->io_bl && bgid < BGID_ARRAY)
- return &ctx->io_bl[bgid];
+ lockdep_assert_held(&ctx->uring_lock);
- return xa_load(&ctx->io_bl_xa, bgid);
+ return __io_buffer_get_list(ctx, ctx->io_bl, bgid);
}
static int io_buffer_add_list(struct io_ring_ctx *ctx,
struct io_buffer_list *bl, unsigned int bgid)
{
+ /*
+ * Store buffer group ID and finally mark the list as visible.
+ * The normal lookup doesn't care about the visibility as we're
+ * always under the ->uring_lock, but the RCU lookup from mmap does.
+ */
bl->bgid = bgid;
+ smp_store_release(&bl->is_ready, 1);
+
if (bgid < BGID_ARRAY)
return 0;
@@ -196,21 +219,40 @@ void __user *io_buffer_select(struct io_kiocb *req, size_t *len,
static __cold int io_init_bl_list(struct io_ring_ctx *ctx)
{
+ struct io_buffer_list *bl;
int i;
- ctx->io_bl = kcalloc(BGID_ARRAY, sizeof(struct io_buffer_list),
- GFP_KERNEL);
- if (!ctx->io_bl)
+ bl = kcalloc(BGID_ARRAY, sizeof(struct io_buffer_list), GFP_KERNEL);
+ if (!bl)
return -ENOMEM;
for (i = 0; i < BGID_ARRAY; i++) {
- INIT_LIST_HEAD(&ctx->io_bl[i].buf_list);
- ctx->io_bl[i].bgid = i;
+ INIT_LIST_HEAD(&bl[i].buf_list);
+ bl[i].bgid = i;
}
+ smp_store_release(&ctx->io_bl, bl);
return 0;
}
+/*
+ * Mark the given mapped range as free for reuse
+ */
+static void io_kbuf_mark_free(struct io_ring_ctx *ctx, struct io_buffer_list *bl)
+{
+ struct io_buf_free *ibf;
+
+ hlist_for_each_entry(ibf, &ctx->io_buf_list, list) {
+ if (bl->buf_ring == ibf->mem) {
+ ibf->inuse = 0;
+ return;
+ }
+ }
+
+ /* can't happen... */
+ WARN_ON_ONCE(1);
+}
+
static int __io_remove_buffers(struct io_ring_ctx *ctx,
struct io_buffer_list *bl, unsigned nbufs)
{
@@ -223,7 +265,11 @@ static int __io_remove_buffers(struct io_ring_ctx *ctx,
if (bl->is_mapped) {
i = bl->buf_ring->tail - bl->head;
if (bl->is_mmap) {
- folio_put(virt_to_folio(bl->buf_ring));
+ /*
+ * io_kbuf_list_free() will free the page(s) at
+ * ->release() time.
+ */
+ io_kbuf_mark_free(ctx, bl);
bl->buf_ring = NULL;
bl->is_mmap = 0;
} else if (bl->buf_nr_pages) {
@@ -274,9 +320,17 @@ void io_destroy_buffers(struct io_ring_ctx *ctx)
xa_for_each(&ctx->io_bl_xa, index, bl) {
xa_erase(&ctx->io_bl_xa, bl->bgid);
__io_remove_buffers(ctx, bl, -1U);
- kfree(bl);
+ kfree_rcu(bl, rcu);
}
+ /*
+ * Move deferred locked entries to cache before pruning
+ */
+ spin_lock(&ctx->completion_lock);
+ if (!list_empty(&ctx->io_buffers_comp))
+ list_splice_init(&ctx->io_buffers_comp, &ctx->io_buffers_cache);
+ spin_unlock(&ctx->completion_lock);
+
list_for_each_safe(item, tmp, &ctx->io_buffers_cache) {
buf = list_entry(item, struct io_buffer, list);
kmem_cache_free(io_buf_cachep, buf);
@@ -460,7 +514,16 @@ int io_provide_buffers(struct io_kiocb *req, unsigned int issue_flags)
INIT_LIST_HEAD(&bl->buf_list);
ret = io_buffer_add_list(ctx, bl, p->bgid);
if (ret) {
- kfree(bl);
+ /*
+ * Doesn't need rcu free as it was never visible, but
+ * let's keep it consistent throughout. Also can't
+ * be a lower indexed array group, as adding one
+ * where lookup failed cannot happen.
+ */
+ if (p->bgid >= BGID_ARRAY)
+ kfree_rcu(bl, rcu);
+ else
+ WARN_ON_ONCE(1);
goto err;
}
}
@@ -531,19 +594,63 @@ error_unpin:
return -EINVAL;
}
-static int io_alloc_pbuf_ring(struct io_uring_buf_reg *reg,
+/*
+ * See if we have a suitable region that we can reuse, rather than allocate
+ * both a new io_buf_free and mem region again. We leave it on the list as
+ * even a reused entry will need freeing at ring release.
+ */
+static struct io_buf_free *io_lookup_buf_free_entry(struct io_ring_ctx *ctx,
+ size_t ring_size)
+{
+ struct io_buf_free *ibf, *best = NULL;
+ size_t best_dist;
+
+ hlist_for_each_entry(ibf, &ctx->io_buf_list, list) {
+ size_t dist;
+
+ if (ibf->inuse || ibf->size < ring_size)
+ continue;
+ dist = ibf->size - ring_size;
+ if (!best || dist < best_dist) {
+ best = ibf;
+ if (!dist)
+ break;
+ best_dist = dist;
+ }
+ }
+
+ return best;
+}
+
+static int io_alloc_pbuf_ring(struct io_ring_ctx *ctx,
+ struct io_uring_buf_reg *reg,
struct io_buffer_list *bl)
{
- gfp_t gfp = GFP_KERNEL_ACCOUNT | __GFP_ZERO | __GFP_NOWARN | __GFP_COMP;
+ struct io_buf_free *ibf;
size_t ring_size;
void *ptr;
ring_size = reg->ring_entries * sizeof(struct io_uring_buf_ring);
- ptr = (void *) __get_free_pages(gfp, get_order(ring_size));
- if (!ptr)
- return -ENOMEM;
- bl->buf_ring = ptr;
+ /* Reuse existing entry, if we can */
+ ibf = io_lookup_buf_free_entry(ctx, ring_size);
+ if (!ibf) {
+ ptr = io_mem_alloc(ring_size);
+ if (IS_ERR(ptr))
+ return PTR_ERR(ptr);
+
+ /* Allocate and store deferred free entry */
+ ibf = kmalloc(sizeof(*ibf), GFP_KERNEL_ACCOUNT);
+ if (!ibf) {
+ io_mem_free(ptr);
+ return -ENOMEM;
+ }
+ ibf->mem = ptr;
+ ibf->size = ring_size;
+ hlist_add_head(&ibf->list, &ctx->io_buf_list);
+ }
+ ibf->inuse = 1;
+ bl->buf_ring = ibf->mem;
bl->is_mapped = 1;
bl->is_mmap = 1;
return 0;
@@ -555,6 +662,8 @@ int io_register_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg)
struct io_buffer_list *bl, *free_bl = NULL;
int ret;
+ lockdep_assert_held(&ctx->uring_lock);
+
if (copy_from_user(&reg, arg, sizeof(reg)))
return -EFAULT;
@@ -599,7 +708,7 @@ int io_register_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg)
if (!(reg.flags & IOU_PBUF_RING_MMAP))
ret = io_pin_pbuf_ring(&reg, bl);
else
- ret = io_alloc_pbuf_ring(&reg, bl);
+ ret = io_alloc_pbuf_ring(ctx, &reg, bl);
if (!ret) {
bl->nr_entries = reg.ring_entries;
@@ -609,7 +718,7 @@ int io_register_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg)
return 0;
}
- kfree(free_bl);
+ kfree_rcu(free_bl, rcu);
return ret;
}
@@ -618,6 +727,8 @@ int io_unregister_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg)
struct io_uring_buf_reg reg;
struct io_buffer_list *bl;
+ lockdep_assert_held(&ctx->uring_lock);
+
if (copy_from_user(&reg, arg, sizeof(reg)))
return -EFAULT;
if (reg.resv[0] || reg.resv[1] || reg.resv[2])
@@ -634,7 +745,7 @@ int io_unregister_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg)
__io_remove_buffers(ctx, bl, -1U);
if (bl->bgid >= BGID_ARRAY) {
xa_erase(&ctx->io_bl_xa, bl->bgid);
- kfree(bl);
+ kfree_rcu(bl, rcu);
}
return 0;
}
@@ -643,9 +754,33 @@ void *io_pbuf_get_address(struct io_ring_ctx *ctx, unsigned long bgid)
{
struct io_buffer_list *bl;
- bl = io_buffer_get_list(ctx, bgid);
+ bl = __io_buffer_get_list(ctx, smp_load_acquire(&ctx->io_bl), bgid);
+
if (!bl || !bl->is_mmap)
return NULL;
+ /*
+ * Ensure the list is fully setup. Only strictly needed for RCU lookup
+ * via mmap, and in that case only for the array indexed groups. For
+ * the xarray lookups, it's either visible and ready, or not at all.
+ */
+ if (!smp_load_acquire(&bl->is_ready))
+ return NULL;
return bl->buf_ring;
}
+
+/*
+ * Called at or after ->release(), free the mmap'ed buffers that we used
+ * for memory mapped provided buffer rings.
+ */
+void io_kbuf_mmap_list_free(struct io_ring_ctx *ctx)
+{
+ struct io_buf_free *ibf;
+ struct hlist_node *tmp;
+
+ hlist_for_each_entry_safe(ibf, tmp, &ctx->io_buf_list, list) {
+ hlist_del(&ibf->list);
+ io_mem_free(ibf->mem);
+ kfree(ibf);
+ }
+}
diff --git a/io_uring/kbuf.h b/io_uring/kbuf.h
index f2d615236b2c..9be5960817ea 100644
--- a/io_uring/kbuf.h
+++ b/io_uring/kbuf.h
@@ -15,6 +15,7 @@ struct io_buffer_list {
struct page **buf_pages;
struct io_uring_buf_ring *buf_ring;
};
+ struct rcu_head rcu;
};
__u16 bgid;
@@ -28,6 +29,8 @@ struct io_buffer_list {
__u8 is_mapped;
/* ring mapped provided buffers, but mmap'ed by application */
__u8 is_mmap;
+ /* bl is visible from an RCU point of view for lookup */
+ __u8 is_ready;
};
struct io_buffer {
@@ -51,6 +54,8 @@ int io_provide_buffers(struct io_kiocb *req, unsigned int issue_flags);
int io_register_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg);
int io_unregister_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg);
+void io_kbuf_mmap_list_free(struct io_ring_ctx *ctx);
+
unsigned int __io_put_kbuf(struct io_kiocb *req, unsigned issue_flags);
bool io_kbuf_recycle_legacy(struct io_kiocb *req, unsigned issue_flags);
diff --git a/io_uring/rsrc.h b/io_uring/rsrc.h
index 8625181fb87a..08ac0d8e07ef 100644
--- a/io_uring/rsrc.h
+++ b/io_uring/rsrc.h
@@ -77,17 +77,10 @@ int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg,
int __io_scm_file_account(struct io_ring_ctx *ctx, struct file *file);
-#if defined(CONFIG_UNIX)
-static inline bool io_file_need_scm(struct file *filp)
-{
- return !!unix_get_socket(filp);
-}
-#else
static inline bool io_file_need_scm(struct file *filp)
{
return false;
}
-#endif
static inline int io_scm_file_account(struct io_ring_ctx *ctx,
struct file *file)
diff --git a/kernel/Kconfig.kexec b/kernel/Kconfig.kexec
index 7aff28ded2f4..1cc3b1c595d7 100644
--- a/kernel/Kconfig.kexec
+++ b/kernel/Kconfig.kexec
@@ -97,7 +97,6 @@ config CRASH_DUMP
depends on ARCH_SUPPORTS_KEXEC
select CRASH_CORE
select KEXEC_CORE
- select KEXEC
help
Generate crash dump after being started by kexec.
This should be normally only set in special crash dump kernels
diff --git a/kernel/bpf/arraymap.c b/kernel/bpf/arraymap.c
index 2058e89b5ddd..c85ff9162a5c 100644
--- a/kernel/bpf/arraymap.c
+++ b/kernel/bpf/arraymap.c
@@ -1012,11 +1012,16 @@ static void prog_array_map_poke_untrack(struct bpf_map *map,
mutex_unlock(&aux->poke_mutex);
}
+void __weak bpf_arch_poke_desc_update(struct bpf_jit_poke_descriptor *poke,
+ struct bpf_prog *new, struct bpf_prog *old)
+{
+ WARN_ON_ONCE(1);
+}
+
static void prog_array_map_poke_run(struct bpf_map *map, u32 key,
struct bpf_prog *old,
struct bpf_prog *new)
{
- u8 *old_addr, *new_addr, *old_bypass_addr;
struct prog_poke_elem *elem;
struct bpf_array_aux *aux;
@@ -1025,7 +1030,7 @@ static void prog_array_map_poke_run(struct bpf_map *map, u32 key,
list_for_each_entry(elem, &aux->poke_progs, list) {
struct bpf_jit_poke_descriptor *poke;
- int i, ret;
+ int i;
for (i = 0; i < elem->aux->size_poke_tab; i++) {
poke = &elem->aux->poke_tab[i];
@@ -1044,21 +1049,10 @@ static void prog_array_map_poke_run(struct bpf_map *map, u32 key,
* activated, so tail call updates can arrive from here
* while JIT is still finishing its final fixup for
* non-activated poke entries.
- * 3) On program teardown, the program's kallsym entry gets
- * removed out of RCU callback, but we can only untrack
- * from sleepable context, therefore bpf_arch_text_poke()
- * might not see that this is in BPF text section and
- * bails out with -EINVAL. As these are unreachable since
- * RCU grace period already passed, we simply skip them.
- * 4) Also programs reaching refcount of zero while patching
+ * 3) Also programs reaching refcount of zero while patching
* is in progress is okay since we're protected under
* poke_mutex and untrack the programs before the JIT
- * buffer is freed. When we're still in the middle of
- * patching and suddenly kallsyms entry of the program
- * gets evicted, we just skip the rest which is fine due
- * to point 3).
- * 5) Any other error happening below from bpf_arch_text_poke()
- * is a unexpected bug.
+ * buffer is freed.
*/
if (!READ_ONCE(poke->tailcall_target_stable))
continue;
@@ -1068,39 +1062,7 @@ static void prog_array_map_poke_run(struct bpf_map *map, u32 key,
poke->tail_call.key != key)
continue;
- old_bypass_addr = old ? NULL : poke->bypass_addr;
- old_addr = old ? (u8 *)old->bpf_func + poke->adj_off : NULL;
- new_addr = new ? (u8 *)new->bpf_func + poke->adj_off : NULL;
-
- if (new) {
- ret = bpf_arch_text_poke(poke->tailcall_target,
- BPF_MOD_JUMP,
- old_addr, new_addr);
- BUG_ON(ret < 0 && ret != -EINVAL);
- if (!old) {
- ret = bpf_arch_text_poke(poke->tailcall_bypass,
- BPF_MOD_JUMP,
- poke->bypass_addr,
- NULL);
- BUG_ON(ret < 0 && ret != -EINVAL);
- }
- } else {
- ret = bpf_arch_text_poke(poke->tailcall_bypass,
- BPF_MOD_JUMP,
- old_bypass_addr,
- poke->bypass_addr);
- BUG_ON(ret < 0 && ret != -EINVAL);
- /* let other CPUs finish the execution of program
- * so that it will not possible to expose them
- * to invalid nop, stack unwind, nop state
- */
- if (!ret)
- synchronize_rcu();
- ret = bpf_arch_text_poke(poke->tailcall_target,
- BPF_MOD_JUMP,
- old_addr, NULL);
- BUG_ON(ret < 0 && ret != -EINVAL);
- }
+ bpf_arch_poke_desc_update(poke, new, old);
}
}
}
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index cd3afe57ece3..fe254ae035fe 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -371,14 +371,18 @@ static int bpf_adj_delta_to_imm(struct bpf_insn *insn, u32 pos, s32 end_old,
static int bpf_adj_delta_to_off(struct bpf_insn *insn, u32 pos, s32 end_old,
s32 end_new, s32 curr, const bool probe_pass)
{
- const s32 off_min = S16_MIN, off_max = S16_MAX;
+ s64 off_min, off_max, off;
s32 delta = end_new - end_old;
- s32 off;
- if (insn->code == (BPF_JMP32 | BPF_JA))
+ if (insn->code == (BPF_JMP32 | BPF_JA)) {
off = insn->imm;
- else
+ off_min = S32_MIN;
+ off_max = S32_MAX;
+ } else {
off = insn->off;
+ off_min = S16_MIN;
+ off_max = S16_MAX;
+ }
if (curr < pos && curr + off + 1 >= end_old)
off += delta;
diff --git a/kernel/bpf/memalloc.c b/kernel/bpf/memalloc.c
index 63b909d277d4..6a51cfe4c2d6 100644
--- a/kernel/bpf/memalloc.c
+++ b/kernel/bpf/memalloc.c
@@ -978,6 +978,8 @@ void notrace *bpf_mem_cache_alloc_flags(struct bpf_mem_alloc *ma, gfp_t flags)
memcg = get_memcg(c);
old_memcg = set_active_memcg(memcg);
ret = __alloc(c, NUMA_NO_NODE, GFP_KERNEL | __GFP_NOWARN | __GFP_ACCOUNT);
+ if (ret)
+ *(struct bpf_mem_cache **)ret = c;
set_active_memcg(old_memcg);
mem_cgroup_put(memcg);
}
diff --git a/kernel/cgroup/legacy_freezer.c b/kernel/cgroup/legacy_freezer.c
index 122dacb3a443..66d1708042a7 100644
--- a/kernel/cgroup/legacy_freezer.c
+++ b/kernel/cgroup/legacy_freezer.c
@@ -66,9 +66,15 @@ static struct freezer *parent_freezer(struct freezer *freezer)
bool cgroup_freezing(struct task_struct *task)
{
bool ret;
+ unsigned int state;
rcu_read_lock();
- ret = task_freezer(task)->state & CGROUP_FREEZING;
+ /* Check if the cgroup is still FREEZING, but not FROZEN. The extra
+ * !FROZEN check is required, because the FREEZING bit is not cleared
+ * when the state FROZEN is reached.
+ */
+ state = task_freezer(task)->state;
+ ret = (state & CGROUP_FREEZING) && !(state & CGROUP_FROZEN);
rcu_read_unlock();
return ret;
diff --git a/kernel/events/core.c b/kernel/events/core.c
index b704d83a28b2..c9d123e13b57 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -1814,31 +1814,34 @@ static inline void perf_event__state_init(struct perf_event *event)
PERF_EVENT_STATE_INACTIVE;
}
-static void __perf_event_read_size(struct perf_event *event, int nr_siblings)
+static int __perf_event_read_size(u64 read_format, int nr_siblings)
{
int entry = sizeof(u64); /* value */
int size = 0;
int nr = 1;
- if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
+ if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
size += sizeof(u64);
- if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
+ if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
size += sizeof(u64);
- if (event->attr.read_format & PERF_FORMAT_ID)
+ if (read_format & PERF_FORMAT_ID)
entry += sizeof(u64);
- if (event->attr.read_format & PERF_FORMAT_LOST)
+ if (read_format & PERF_FORMAT_LOST)
entry += sizeof(u64);
- if (event->attr.read_format & PERF_FORMAT_GROUP) {
+ if (read_format & PERF_FORMAT_GROUP) {
nr += nr_siblings;
size += sizeof(u64);
}
- size += entry * nr;
- event->read_size = size;
+ /*
+ * Since perf_event_validate_size() limits this to 16k and inhibits
+ * adding more siblings, this will never overflow.
+ */
+ return size + nr * entry;
}
static void __perf_event_header_size(struct perf_event *event, u64 sample_type)
@@ -1888,8 +1891,9 @@ static void __perf_event_header_size(struct perf_event *event, u64 sample_type)
*/
static void perf_event__header_size(struct perf_event *event)
{
- __perf_event_read_size(event,
- event->group_leader->nr_siblings);
+ event->read_size =
+ __perf_event_read_size(event->attr.read_format,
+ event->group_leader->nr_siblings);
__perf_event_header_size(event, event->attr.sample_type);
}
@@ -1920,24 +1924,35 @@ static void perf_event__id_header_size(struct perf_event *event)
event->id_header_size = size;
}
+/*
+ * Check that adding an event to the group does not result in anybody
+ * overflowing the 64k event limit imposed by the output buffer.
+ *
+ * Specifically, check that the read_size for the event does not exceed 16k,
+ * read_size being the one term that grows with groups size. Since read_size
+ * depends on per-event read_format, also (re)check the existing events.
+ *
+ * This leaves 48k for the constant size fields and things like callchains,
+ * branch stacks and register sets.
+ */
static bool perf_event_validate_size(struct perf_event *event)
{
- /*
- * The values computed here will be over-written when we actually
- * attach the event.
- */
- __perf_event_read_size(event, event->group_leader->nr_siblings + 1);
- __perf_event_header_size(event, event->attr.sample_type & ~PERF_SAMPLE_READ);
- perf_event__id_header_size(event);
+ struct perf_event *sibling, *group_leader = event->group_leader;
- /*
- * Sum the lot; should not exceed the 64k limit we have on records.
- * Conservative limit to allow for callchains and other variable fields.
- */
- if (event->read_size + event->header_size +
- event->id_header_size + sizeof(struct perf_event_header) >= 16*1024)
+ if (__perf_event_read_size(event->attr.read_format,
+ group_leader->nr_siblings + 1) > 16*1024)
return false;
+ if (__perf_event_read_size(group_leader->attr.read_format,
+ group_leader->nr_siblings + 1) > 16*1024)
+ return false;
+
+ for_each_sibling_event(sibling, group_leader) {
+ if (__perf_event_read_size(sibling->attr.read_format,
+ group_leader->nr_siblings + 1) > 16*1024)
+ return false;
+ }
+
return true;
}
diff --git a/kernel/freezer.c b/kernel/freezer.c
index c450fa8b8b5e..759006a9a910 100644
--- a/kernel/freezer.c
+++ b/kernel/freezer.c
@@ -201,7 +201,7 @@ void __thaw_task(struct task_struct *p)
if (WARN_ON_ONCE(freezing(p)))
goto unlock;
- if (task_call_func(p, __restore_freezer_state, NULL))
+ if (!frozen(p) || task_call_func(p, __restore_freezer_state, NULL))
goto unlock;
wake_up_state(p, TASK_FROZEN);
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index 075a632e6c7c..d5a0ee40bf66 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -2252,7 +2252,7 @@ int register_kretprobe(struct kretprobe *rp)
rp->rph = NULL;
return -ENOMEM;
}
- rp->rph->rp = rp;
+ rcu_assign_pointer(rp->rph->rp, rp);
rp->nmissed = 0;
/* Establish function entry probe point */
ret = register_kprobe(&rp->kp);
@@ -2300,7 +2300,7 @@ void unregister_kretprobes(struct kretprobe **rps, int num)
#ifdef CONFIG_KRETPROBE_ON_RETHOOK
rethook_free(rps[i]->rh);
#else
- rps[i]->rph->rp = NULL;
+ rcu_assign_pointer(rps[i]->rph->rp, NULL);
#endif
}
mutex_unlock(&kprobe_mutex);
diff --git a/kernel/trace/rethook.c b/kernel/trace/rethook.c
index 6fd7d4ecbbc6..fa03094e9e69 100644
--- a/kernel/trace/rethook.c
+++ b/kernel/trace/rethook.c
@@ -48,7 +48,7 @@ static void rethook_free_rcu(struct rcu_head *head)
*/
void rethook_stop(struct rethook *rh)
{
- WRITE_ONCE(rh->handler, NULL);
+ rcu_assign_pointer(rh->handler, NULL);
}
/**
@@ -63,7 +63,7 @@ void rethook_stop(struct rethook *rh)
*/
void rethook_free(struct rethook *rh)
{
- WRITE_ONCE(rh->handler, NULL);
+ rethook_stop(rh);
call_rcu(&rh->rcu, rethook_free_rcu);
}
@@ -82,6 +82,12 @@ static int rethook_fini_pool(struct objpool_head *head, void *context)
return 0;
}
+static inline rethook_handler_t rethook_get_handler(struct rethook *rh)
+{
+ return (rethook_handler_t)rcu_dereference_check(rh->handler,
+ rcu_read_lock_any_held());
+}
+
/**
* rethook_alloc() - Allocate struct rethook.
* @data: a data to pass the @handler when hooking the return.
@@ -107,7 +113,7 @@ struct rethook *rethook_alloc(void *data, rethook_handler_t handler,
return ERR_PTR(-ENOMEM);
rh->data = data;
- rh->handler = handler;
+ rcu_assign_pointer(rh->handler, handler);
/* initialize the objpool for rethook nodes */
if (objpool_init(&rh->pool, num, size, GFP_KERNEL, rh,
@@ -135,9 +141,10 @@ static void free_rethook_node_rcu(struct rcu_head *head)
*/
void rethook_recycle(struct rethook_node *node)
{
- lockdep_assert_preemption_disabled();
+ rethook_handler_t handler;
- if (likely(READ_ONCE(node->rethook->handler)))
+ handler = rethook_get_handler(node->rethook);
+ if (likely(handler))
objpool_push(node, &node->rethook->pool);
else
call_rcu(&node->rcu, free_rethook_node_rcu);
@@ -153,9 +160,7 @@ NOKPROBE_SYMBOL(rethook_recycle);
*/
struct rethook_node *rethook_try_get(struct rethook *rh)
{
- rethook_handler_t handler = READ_ONCE(rh->handler);
-
- lockdep_assert_preemption_disabled();
+ rethook_handler_t handler = rethook_get_handler(rh);
/* Check whether @rh is going to be freed. */
if (unlikely(!handler))
@@ -300,7 +305,7 @@ unsigned long rethook_trampoline_handler(struct pt_regs *regs,
rhn = container_of(first, struct rethook_node, llist);
if (WARN_ON_ONCE(rhn->frame != frame))
break;
- handler = READ_ONCE(rhn->rethook->handler);
+ handler = rethook_get_handler(rhn->rethook);
if (handler)
handler(rhn, rhn->rethook->data,
correct_ret_addr, regs);
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 43cc47d7faaf..8d2a4f00eca9 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -644,8 +644,8 @@ static inline bool __rb_time_read(rb_time_t *t, u64 *ret, unsigned long *cnt)
*cnt = rb_time_cnt(top);
- /* If top and bottom counts don't match, this interrupted a write */
- if (*cnt != rb_time_cnt(bottom))
+ /* If top and msb counts don't match, this interrupted a write */
+ if (*cnt != rb_time_cnt(msb))
return false;
/* The shift to msb will lose its cnt bits */
@@ -3030,23 +3030,20 @@ rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
local_read(&bpage->write) & ~RB_WRITE_MASK;
unsigned long event_length = rb_event_length(event);
+ /*
+ * For the before_stamp to be different than the write_stamp
+ * to make sure that the next event adds an absolute
+ * value and does not rely on the saved write stamp, which
+ * is now going to be bogus.
+ */
+ rb_time_set(&cpu_buffer->before_stamp, 0);
+
/* Something came in, can't discard */
if (!rb_time_cmpxchg(&cpu_buffer->write_stamp,
write_stamp, write_stamp - delta))
return false;
/*
- * It's possible that the event time delta is zero
- * (has the same time stamp as the previous event)
- * in which case write_stamp and before_stamp could
- * be the same. In such a case, force before_stamp
- * to be different than write_stamp. It doesn't
- * matter what it is, as long as its different.
- */
- if (!delta)
- rb_time_set(&cpu_buffer->before_stamp, 0);
-
- /*
* If an event were to come in now, it would see that the
* write_stamp and the before_stamp are different, and assume
* that this event just added itself before updating
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 9aebf904ff97..fbcd3bafb93e 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -2360,13 +2360,7 @@ int is_tracing_stopped(void)
return global_trace.stop_count;
}
-/**
- * tracing_start - quick start of the tracer
- *
- * If tracing is enabled but was stopped by tracing_stop,
- * this will start the tracer back up.
- */
-void tracing_start(void)
+static void tracing_start_tr(struct trace_array *tr)
{
struct trace_buffer *buffer;
unsigned long flags;
@@ -2374,119 +2368,83 @@ void tracing_start(void)
if (tracing_disabled)
return;
- raw_spin_lock_irqsave(&global_trace.start_lock, flags);
- if (--global_trace.stop_count) {
- if (global_trace.stop_count < 0) {
+ raw_spin_lock_irqsave(&tr->start_lock, flags);
+ if (--tr->stop_count) {
+ if (WARN_ON_ONCE(tr->stop_count < 0)) {
/* Someone screwed up their debugging */
- WARN_ON_ONCE(1);
- global_trace.stop_count = 0;
+ tr->stop_count = 0;
}
goto out;
}
/* Prevent the buffers from switching */
- arch_spin_lock(&global_trace.max_lock);
+ arch_spin_lock(&tr->max_lock);
- buffer = global_trace.array_buffer.buffer;
+ buffer = tr->array_buffer.buffer;
if (buffer)
ring_buffer_record_enable(buffer);
#ifdef CONFIG_TRACER_MAX_TRACE
- buffer = global_trace.max_buffer.buffer;
+ buffer = tr->max_buffer.buffer;
if (buffer)
ring_buffer_record_enable(buffer);
#endif
- arch_spin_unlock(&global_trace.max_lock);
-
- out:
- raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
-}
-
-static void tracing_start_tr(struct trace_array *tr)
-{
- struct trace_buffer *buffer;
- unsigned long flags;
-
- if (tracing_disabled)
- return;
-
- /* If global, we need to also start the max tracer */
- if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
- return tracing_start();
-
- raw_spin_lock_irqsave(&tr->start_lock, flags);
-
- if (--tr->stop_count) {
- if (tr->stop_count < 0) {
- /* Someone screwed up their debugging */
- WARN_ON_ONCE(1);
- tr->stop_count = 0;
- }
- goto out;
- }
-
- buffer = tr->array_buffer.buffer;
- if (buffer)
- ring_buffer_record_enable(buffer);
+ arch_spin_unlock(&tr->max_lock);
out:
raw_spin_unlock_irqrestore(&tr->start_lock, flags);
}
/**
- * tracing_stop - quick stop of the tracer
+ * tracing_start - quick start of the tracer
*
- * Light weight way to stop tracing. Use in conjunction with
- * tracing_start.
+ * If tracing is enabled but was stopped by tracing_stop,
+ * this will start the tracer back up.
*/
-void tracing_stop(void)
+void tracing_start(void)
+
+{
+ return tracing_start_tr(&global_trace);
+}
+
+static void tracing_stop_tr(struct trace_array *tr)
{
struct trace_buffer *buffer;
unsigned long flags;
- raw_spin_lock_irqsave(&global_trace.start_lock, flags);
- if (global_trace.stop_count++)
+ raw_spin_lock_irqsave(&tr->start_lock, flags);
+ if (tr->stop_count++)
goto out;
/* Prevent the buffers from switching */
- arch_spin_lock(&global_trace.max_lock);
+ arch_spin_lock(&tr->max_lock);
- buffer = global_trace.array_buffer.buffer;
+ buffer = tr->array_buffer.buffer;
if (buffer)
ring_buffer_record_disable(buffer);
#ifdef CONFIG_TRACER_MAX_TRACE
- buffer = global_trace.max_buffer.buffer;
+ buffer = tr->max_buffer.buffer;
if (buffer)
ring_buffer_record_disable(buffer);
#endif
- arch_spin_unlock(&global_trace.max_lock);
+ arch_spin_unlock(&tr->max_lock);
out:
- raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
+ raw_spin_unlock_irqrestore(&tr->start_lock, flags);
}
-static void tracing_stop_tr(struct trace_array *tr)
+/**
+ * tracing_stop - quick stop of the tracer
+ *
+ * Light weight way to stop tracing. Use in conjunction with
+ * tracing_start.
+ */
+void tracing_stop(void)
{
- struct trace_buffer *buffer;
- unsigned long flags;
-
- /* If global, we need to also stop the max tracer */
- if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
- return tracing_stop();
-
- raw_spin_lock_irqsave(&tr->start_lock, flags);
- if (tr->stop_count++)
- goto out;
-
- buffer = tr->array_buffer.buffer;
- if (buffer)
- ring_buffer_record_disable(buffer);
-
- out:
- raw_spin_unlock_irqrestore(&tr->start_lock, flags);
+ return tracing_stop_tr(&global_trace);
}
static int trace_save_cmdline(struct task_struct *tsk)
@@ -2770,8 +2728,11 @@ void trace_buffered_event_enable(void)
for_each_tracing_cpu(cpu) {
page = alloc_pages_node(cpu_to_node(cpu),
GFP_KERNEL | __GFP_NORETRY, 0);
- if (!page)
- goto failed;
+ /* This is just an optimization and can handle failures */
+ if (!page) {
+ pr_err("Failed to allocate event buffer\n");
+ break;
+ }
event = page_address(page);
memset(event, 0, sizeof(*event));
@@ -2785,10 +2746,6 @@ void trace_buffered_event_enable(void)
WARN_ON_ONCE(1);
preempt_enable();
}
-
- return;
- failed:
- trace_buffered_event_disable();
}
static void enable_trace_buffered_event(void *data)
@@ -2823,11 +2780,9 @@ void trace_buffered_event_disable(void)
if (--trace_buffered_event_ref)
return;
- preempt_disable();
/* For each CPU, set the buffer as used. */
- smp_call_function_many(tracing_buffer_mask,
- disable_trace_buffered_event, NULL, 1);
- preempt_enable();
+ on_each_cpu_mask(tracing_buffer_mask, disable_trace_buffered_event,
+ NULL, true);
/* Wait for all current users to finish */
synchronize_rcu();
@@ -2836,17 +2791,19 @@ void trace_buffered_event_disable(void)
free_page((unsigned long)per_cpu(trace_buffered_event, cpu));
per_cpu(trace_buffered_event, cpu) = NULL;
}
+
/*
- * Make sure trace_buffered_event is NULL before clearing
- * trace_buffered_event_cnt.
+ * Wait for all CPUs that potentially started checking if they can use
+ * their event buffer only after the previous synchronize_rcu() call and
+ * they still read a valid pointer from trace_buffered_event. It must be
+ * ensured they don't see cleared trace_buffered_event_cnt else they
+ * could wrongly decide to use the pointed-to buffer which is now freed.
*/
- smp_wmb();
+ synchronize_rcu();
- preempt_disable();
- /* Do the work on each cpu */
- smp_call_function_many(tracing_buffer_mask,
- enable_trace_buffered_event, NULL, 1);
- preempt_enable();
+ /* For each CPU, relinquish the buffer */
+ on_each_cpu_mask(tracing_buffer_mask, enable_trace_buffered_event, NULL,
+ true);
}
static struct trace_buffer *temp_buffer;
@@ -6387,13 +6344,15 @@ static int __tracing_resize_ring_buffer(struct trace_array *tr,
if (!tr->array_buffer.buffer)
return 0;
+ /* Do not allow tracing while resizng ring buffer */
+ tracing_stop_tr(tr);
+
ret = ring_buffer_resize(tr->array_buffer.buffer, size, cpu);
if (ret < 0)
- return ret;
+ goto out_start;
#ifdef CONFIG_TRACER_MAX_TRACE
- if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL) ||
- !tr->current_trace->use_max_tr)
+ if (!tr->current_trace->use_max_tr)
goto out;
ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu);
@@ -6418,7 +6377,7 @@ static int __tracing_resize_ring_buffer(struct trace_array *tr,
WARN_ON(1);
tracing_disabled = 1;
}
- return ret;
+ goto out_start;
}
update_buffer_entries(&tr->max_buffer, cpu);
@@ -6427,7 +6386,8 @@ static int __tracing_resize_ring_buffer(struct trace_array *tr,
#endif /* CONFIG_TRACER_MAX_TRACE */
update_buffer_entries(&tr->array_buffer, cpu);
-
+ out_start:
+ tracing_start_tr(tr);
return ret;
}
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 6e578f576a6f..2989b57e154a 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -1684,9 +1684,6 @@ static int wq_select_unbound_cpu(int cpu)
pr_warn_once("workqueue: round-robin CPU selection forced, expect performance impact\n");
}
- if (cpumask_empty(wq_unbound_cpumask))
- return cpu;
-
new_cpu = __this_cpu_read(wq_rr_cpu_last);
new_cpu = cpumask_next_and(new_cpu, wq_unbound_cpumask, cpu_online_mask);
if (unlikely(new_cpu >= nr_cpu_ids)) {
@@ -6515,6 +6512,17 @@ static inline void wq_watchdog_init(void) { }
#endif /* CONFIG_WQ_WATCHDOG */
+static void __init restrict_unbound_cpumask(const char *name, const struct cpumask *mask)
+{
+ if (!cpumask_intersects(wq_unbound_cpumask, mask)) {
+ pr_warn("workqueue: Restricting unbound_cpumask (%*pb) with %s (%*pb) leaves no CPU, ignoring\n",
+ cpumask_pr_args(wq_unbound_cpumask), name, cpumask_pr_args(mask));
+ return;
+ }
+
+ cpumask_and(wq_unbound_cpumask, wq_unbound_cpumask, mask);
+}
+
/**
* workqueue_init_early - early init for workqueue subsystem
*
@@ -6534,11 +6542,11 @@ void __init workqueue_init_early(void)
BUILD_BUG_ON(__alignof__(struct pool_workqueue) < __alignof__(long long));
BUG_ON(!alloc_cpumask_var(&wq_unbound_cpumask, GFP_KERNEL));
- cpumask_copy(wq_unbound_cpumask, housekeeping_cpumask(HK_TYPE_WQ));
- cpumask_and(wq_unbound_cpumask, wq_unbound_cpumask, housekeeping_cpumask(HK_TYPE_DOMAIN));
-
+ cpumask_copy(wq_unbound_cpumask, cpu_possible_mask);
+ restrict_unbound_cpumask("HK_TYPE_WQ", housekeeping_cpumask(HK_TYPE_WQ));
+ restrict_unbound_cpumask("HK_TYPE_DOMAIN", housekeeping_cpumask(HK_TYPE_DOMAIN));
if (!cpumask_empty(&wq_cmdline_cpumask))
- cpumask_and(wq_unbound_cpumask, wq_unbound_cpumask, &wq_cmdline_cpumask);
+ restrict_unbound_cpumask("workqueue.unbound_cpus", &wq_cmdline_cpumask);
pwq_cache = KMEM_CACHE(pool_workqueue, SLAB_PANIC);
diff --git a/lib/closure.c b/lib/closure.c
index f86c9eeafb35..c16540552d61 100644
--- a/lib/closure.c
+++ b/lib/closure.c
@@ -36,7 +36,7 @@ static inline void closure_put_after_sub(struct closure *cl, int flags)
closure_debug_destroy(cl);
if (destructor)
- destructor(cl);
+ destructor(&cl->work);
if (parent)
closure_put(parent);
@@ -108,8 +108,9 @@ struct closure_syncer {
int done;
};
-static void closure_sync_fn(struct closure *cl)
+static CLOSURE_CALLBACK(closure_sync_fn)
{
+ struct closure *cl = container_of(ws, struct closure, work);
struct closure_syncer *s = cl->s;
struct task_struct *p;
diff --git a/lib/fw_table.c b/lib/fw_table.c
index b51f30a28e47..294df54e33b6 100644
--- a/lib/fw_table.c
+++ b/lib/fw_table.c
@@ -7,7 +7,7 @@
* Copyright (C) 2023 Intel Corp.
*/
#include <linux/errno.h>
-#include <linux/fw_table.h>
+#include <linux/acpi.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/string.h>
diff --git a/lib/group_cpus.c b/lib/group_cpus.c
index aa3f6815bb12..ee272c4cefcc 100644
--- a/lib/group_cpus.c
+++ b/lib/group_cpus.c
@@ -366,13 +366,25 @@ struct cpumask *group_cpus_evenly(unsigned int numgrps)
if (!masks)
goto fail_node_to_cpumask;
- /* Stabilize the cpumasks */
- cpus_read_lock();
build_node_to_cpumask(node_to_cpumask);
+ /*
+ * Make a local cache of 'cpu_present_mask', so the two stages
+ * spread can observe consistent 'cpu_present_mask' without holding
+ * cpu hotplug lock, then we can reduce deadlock risk with cpu
+ * hotplug code.
+ *
+ * Here CPU hotplug may happen when reading `cpu_present_mask`, and
+ * we can live with the case because it only affects that hotplug
+ * CPU is handled in the 1st or 2nd stage, and either way is correct
+ * from API user viewpoint since 2-stage spread is sort of
+ * optimization.
+ */
+ cpumask_copy(npresmsk, data_race(cpu_present_mask));
+
/* grouping present CPUs first */
ret = __group_cpus_evenly(curgrp, numgrps, node_to_cpumask,
- cpu_present_mask, nmsk, masks);
+ npresmsk, nmsk, masks);
if (ret < 0)
goto fail_build_affinity;
nr_present = ret;
@@ -387,15 +399,13 @@ struct cpumask *group_cpus_evenly(unsigned int numgrps)
curgrp = 0;
else
curgrp = nr_present;
- cpumask_andnot(npresmsk, cpu_possible_mask, cpu_present_mask);
+ cpumask_andnot(npresmsk, cpu_possible_mask, npresmsk);
ret = __group_cpus_evenly(curgrp, numgrps, node_to_cpumask,
npresmsk, nmsk, masks);
if (ret >= 0)
nr_others = ret;
fail_build_affinity:
- cpus_read_unlock();
-
if (ret >= 0)
WARN_ON(nr_present + nr_others < numgrps);
diff --git a/lib/kunit/kunit-test.c b/lib/kunit/kunit-test.c
index 99d2a3a528e1..de2113a58fa0 100644
--- a/lib/kunit/kunit-test.c
+++ b/lib/kunit/kunit-test.c
@@ -562,7 +562,7 @@ static void kunit_log_test(struct kunit *test)
KUNIT_EXPECT_TRUE(test, test->log->append_newlines);
full_log = string_stream_get_string(test->log);
- kunit_add_action(test, (kunit_action_t *)kfree, full_log);
+ kunit_add_action(test, kfree_wrapper, full_log);
KUNIT_EXPECT_NOT_ERR_OR_NULL(test,
strstr(full_log, "put this in log."));
KUNIT_EXPECT_NOT_ERR_OR_NULL(test,
diff --git a/lib/kunit/test.c b/lib/kunit/test.c
index f2eb71f1a66c..7aceb07a1af9 100644
--- a/lib/kunit/test.c
+++ b/lib/kunit/test.c
@@ -338,6 +338,36 @@ void kunit_init_test(struct kunit *test, const char *name, struct string_stream
}
EXPORT_SYMBOL_GPL(kunit_init_test);
+/* Only warn when a test takes more than twice the threshold */
+#define KUNIT_SPEED_WARNING_MULTIPLIER 2
+
+/* Slow tests are defined as taking more than 1s */
+#define KUNIT_SPEED_SLOW_THRESHOLD_S 1
+
+#define KUNIT_SPEED_SLOW_WARNING_THRESHOLD_S \
+ (KUNIT_SPEED_WARNING_MULTIPLIER * KUNIT_SPEED_SLOW_THRESHOLD_S)
+
+#define s_to_timespec64(s) ns_to_timespec64((s) * NSEC_PER_SEC)
+
+static void kunit_run_case_check_speed(struct kunit *test,
+ struct kunit_case *test_case,
+ struct timespec64 duration)
+{
+ struct timespec64 slow_thr =
+ s_to_timespec64(KUNIT_SPEED_SLOW_WARNING_THRESHOLD_S);
+ enum kunit_speed speed = test_case->attr.speed;
+
+ if (timespec64_compare(&duration, &slow_thr) < 0)
+ return;
+
+ if (speed == KUNIT_SPEED_VERY_SLOW || speed == KUNIT_SPEED_SLOW)
+ return;
+
+ kunit_warn(test,
+ "Test should be marked slow (runtime: %lld.%09lds)",
+ duration.tv_sec, duration.tv_nsec);
+}
+
/*
* Initializes and runs test case. Does not clean up or do post validations.
*/
@@ -345,6 +375,8 @@ static void kunit_run_case_internal(struct kunit *test,
struct kunit_suite *suite,
struct kunit_case *test_case)
{
+ struct timespec64 start, end;
+
if (suite->init) {
int ret;
@@ -356,7 +388,13 @@ static void kunit_run_case_internal(struct kunit *test,
}
}
+ ktime_get_ts64(&start);
+
test_case->run_case(test);
+
+ ktime_get_ts64(&end);
+
+ kunit_run_case_check_speed(test, test_case, timespec64_sub(end, start));
}
static void kunit_case_internal_cleanup(struct kunit *test)
@@ -670,6 +708,8 @@ int __kunit_test_suites_init(struct kunit_suite * const * const suites, int num_
return 0;
}
+ kunit_suite_counter = 1;
+
static_branch_inc(&kunit_running);
for (i = 0; i < num_suites; i++) {
@@ -696,8 +736,6 @@ void __kunit_test_suites_exit(struct kunit_suite **suites, int num_suites)
for (i = 0; i < num_suites; i++)
kunit_exit_suite(suites[i]);
-
- kunit_suite_counter = 1;
}
EXPORT_SYMBOL_GPL(__kunit_test_suites_exit);
diff --git a/lib/objpool.c b/lib/objpool.c
index ce0087f64400..cfdc02420884 100644
--- a/lib/objpool.c
+++ b/lib/objpool.c
@@ -201,6 +201,23 @@ static inline void *objpool_try_get_slot(struct objpool_head *pool, int cpu)
while (head != READ_ONCE(slot->last)) {
void *obj;
+ /*
+ * data visibility of 'last' and 'head' could be out of
+ * order since memory updating of 'last' and 'head' are
+ * performed in push() and pop() independently
+ *
+ * before any retrieving attempts, pop() must guarantee
+ * 'last' is behind 'head', that is to say, there must
+ * be available objects in slot, which could be ensured
+ * by condition 'last != head && last - head <= nr_objs'
+ * that is equivalent to 'last - head - 1 < nr_objs' as
+ * 'last' and 'head' are both unsigned int32
+ */
+ if (READ_ONCE(slot->last) - head - 1 >= pool->nr_objs) {
+ head = READ_ONCE(slot->head);
+ continue;
+ }
+
/* obj must be retrieved before moving forward head */
obj = READ_ONCE(slot->entries[head & slot->mask]);
diff --git a/mm/Kconfig b/mm/Kconfig
index 89971a894b60..57cd378c73d6 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -1201,13 +1201,6 @@ config ANON_VMA_NAME
area from being merged with adjacent virtual memory areas due to the
difference in their name.
-config USERFAULTFD
- bool "Enable userfaultfd() system call"
- depends on MMU
- help
- Enable the userfaultfd() system call that allows to intercept and
- handle page faults in userland.
-
config HAVE_ARCH_USERFAULTFD_WP
bool
help
@@ -1218,6 +1211,14 @@ config HAVE_ARCH_USERFAULTFD_MINOR
help
Arch has userfaultfd minor fault support
+menuconfig USERFAULTFD
+ bool "Enable userfaultfd() system call"
+ depends on MMU
+ help
+ Enable the userfaultfd() system call that allows to intercept and
+ handle page faults in userland.
+
+if USERFAULTFD
config PTE_MARKER_UFFD_WP
bool "Userfaultfd write protection support for shmem/hugetlbfs"
default y
@@ -1227,6 +1228,7 @@ config PTE_MARKER_UFFD_WP
Allows to create marker PTEs for userfaultfd write protection
purposes. It is required to enable userfaultfd write protection on
file-backed memory types like shmem and hugetlbfs.
+endif # USERFAULTFD
# multi-gen LRU {
config LRU_GEN
diff --git a/mm/damon/core.c b/mm/damon/core.c
index 6262d55904e7..ce1562783e7e 100644
--- a/mm/damon/core.c
+++ b/mm/damon/core.c
@@ -1225,6 +1225,7 @@ static void damon_split_region_at(struct damon_target *t,
new->age = r->age;
new->last_nr_accesses = r->last_nr_accesses;
new->nr_accesses_bp = r->nr_accesses_bp;
+ new->nr_accesses = r->nr_accesses;
damon_insert_region(new, r, damon_next_region(r), t);
}
diff --git a/mm/damon/sysfs-schemes.c b/mm/damon/sysfs-schemes.c
index be667236b8e6..fe0fe2562000 100644
--- a/mm/damon/sysfs-schemes.c
+++ b/mm/damon/sysfs-schemes.c
@@ -139,6 +139,13 @@ static const struct kobj_type damon_sysfs_scheme_region_ktype = {
* damon_sysfs_before_damos_apply() understands the situation by showing the
* 'finished' status and do nothing.
*
+ * If DAMOS is not applied to any region due to any reasons including the
+ * access pattern, the watermarks, the quotas, and the filters,
+ * ->before_damos_apply() will not be called back. Until the situation is
+ * changed, the update will not be finished. To avoid this,
+ * damon_sysfs_after_sampling() set the status as 'finished' if more than two
+ * apply intervals of the scheme is passed while the state is 'idle'.
+ *
* Finally, the tried regions request handling finisher function
* (damon_sysfs_schemes_update_regions_stop()) unregisters the callbacks.
*/
@@ -154,6 +161,7 @@ struct damon_sysfs_scheme_regions {
int nr_regions;
unsigned long total_bytes;
enum damos_sysfs_regions_upd_status upd_status;
+ unsigned long upd_timeout_jiffies;
};
static struct damon_sysfs_scheme_regions *
@@ -1854,7 +1862,9 @@ static int damon_sysfs_after_sampling(struct damon_ctx *ctx)
for (i = 0; i < sysfs_schemes->nr; i++) {
sysfs_regions = sysfs_schemes->schemes_arr[i]->tried_regions;
if (sysfs_regions->upd_status ==
- DAMOS_TRIED_REGIONS_UPD_STARTED)
+ DAMOS_TRIED_REGIONS_UPD_STARTED ||
+ time_after(jiffies,
+ sysfs_regions->upd_timeout_jiffies))
sysfs_regions->upd_status =
DAMOS_TRIED_REGIONS_UPD_FINISHED;
}
@@ -1885,14 +1895,41 @@ int damon_sysfs_schemes_clear_regions(
return 0;
}
+static struct damos *damos_sysfs_nth_scheme(int n, struct damon_ctx *ctx)
+{
+ struct damos *scheme;
+ int i = 0;
+
+ damon_for_each_scheme(scheme, ctx) {
+ if (i == n)
+ return scheme;
+ i++;
+ }
+ return NULL;
+}
+
static void damos_tried_regions_init_upd_status(
- struct damon_sysfs_schemes *sysfs_schemes)
+ struct damon_sysfs_schemes *sysfs_schemes,
+ struct damon_ctx *ctx)
{
int i;
+ struct damos *scheme;
+ struct damon_sysfs_scheme_regions *sysfs_regions;
- for (i = 0; i < sysfs_schemes->nr; i++)
- sysfs_schemes->schemes_arr[i]->tried_regions->upd_status =
- DAMOS_TRIED_REGIONS_UPD_IDLE;
+ for (i = 0; i < sysfs_schemes->nr; i++) {
+ sysfs_regions = sysfs_schemes->schemes_arr[i]->tried_regions;
+ scheme = damos_sysfs_nth_scheme(i, ctx);
+ if (!scheme) {
+ sysfs_regions->upd_status =
+ DAMOS_TRIED_REGIONS_UPD_FINISHED;
+ continue;
+ }
+ sysfs_regions->upd_status = DAMOS_TRIED_REGIONS_UPD_IDLE;
+ sysfs_regions->upd_timeout_jiffies = jiffies +
+ 2 * usecs_to_jiffies(scheme->apply_interval_us ?
+ scheme->apply_interval_us :
+ ctx->attrs.sample_interval);
+ }
}
/* Called from damon_sysfs_cmd_request_callback under damon_sysfs_lock */
@@ -1902,7 +1939,7 @@ int damon_sysfs_schemes_update_regions_start(
{
damon_sysfs_schemes_clear_regions(sysfs_schemes, ctx);
damon_sysfs_schemes_for_damos_callback = sysfs_schemes;
- damos_tried_regions_init_upd_status(sysfs_schemes);
+ damos_tried_regions_init_upd_status(sysfs_schemes, ctx);
damos_regions_upd_total_bytes_only = total_bytes_only;
ctx->callback.before_damos_apply = damon_sysfs_before_damos_apply;
ctx->callback.after_sampling = damon_sysfs_after_sampling;
diff --git a/mm/filemap.c b/mm/filemap.c
index 32eedf3afd45..f1c8c278310f 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -3371,7 +3371,7 @@ static bool filemap_map_pmd(struct vm_fault *vmf, struct folio *folio,
}
}
- if (pmd_none(*vmf->pmd))
+ if (pmd_none(*vmf->pmd) && vmf->prealloc_pte)
pmd_install(mm, vmf->pmd, &vmf->prealloc_pte);
return false;
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 1169ef2f2176..6feb3e0630d1 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -1182,6 +1182,13 @@ static int is_vma_resv_set(struct vm_area_struct *vma, unsigned long flag)
return (get_vma_private_data(vma) & flag) != 0;
}
+bool __vma_private_lock(struct vm_area_struct *vma)
+{
+ return !(vma->vm_flags & VM_MAYSHARE) &&
+ get_vma_private_data(vma) & ~HPAGE_RESV_MASK &&
+ is_vma_resv_set(vma, HPAGE_RESV_OWNER);
+}
+
void hugetlb_dup_vma_private(struct vm_area_struct *vma)
{
VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
diff --git a/mm/kmemleak.c b/mm/kmemleak.c
index 1eacca03bedd..5501363d6b31 100644
--- a/mm/kmemleak.c
+++ b/mm/kmemleak.c
@@ -642,32 +642,16 @@ static struct kmemleak_object *__alloc_object(gfp_t gfp)
if (!object) {
pr_warn("Cannot allocate a kmemleak_object structure\n");
kmemleak_disable();
+ return NULL;
}
- return object;
-}
-
-static int __link_object(struct kmemleak_object *object, unsigned long ptr,
- size_t size, int min_count, bool is_phys)
-{
-
- struct kmemleak_object *parent;
- struct rb_node **link, *rb_parent;
- unsigned long untagged_ptr;
- unsigned long untagged_objp;
-
INIT_LIST_HEAD(&object->object_list);
INIT_LIST_HEAD(&object->gray_list);
INIT_HLIST_HEAD(&object->area_list);
raw_spin_lock_init(&object->lock);
atomic_set(&object->use_count, 1);
- object->flags = OBJECT_ALLOCATED | (is_phys ? OBJECT_PHYS : 0);
- object->pointer = ptr;
- object->size = kfence_ksize((void *)ptr) ?: size;
object->excess_ref = 0;
- object->min_count = min_count;
object->count = 0; /* white color initially */
- object->jiffies = jiffies;
object->checksum = 0;
object->del_state = 0;
@@ -692,6 +676,24 @@ static int __link_object(struct kmemleak_object *object, unsigned long ptr,
/* kernel backtrace */
object->trace_handle = set_track_prepare();
+ return object;
+}
+
+static int __link_object(struct kmemleak_object *object, unsigned long ptr,
+ size_t size, int min_count, bool is_phys)
+{
+
+ struct kmemleak_object *parent;
+ struct rb_node **link, *rb_parent;
+ unsigned long untagged_ptr;
+ unsigned long untagged_objp;
+
+ object->flags = OBJECT_ALLOCATED | (is_phys ? OBJECT_PHYS : 0);
+ object->pointer = ptr;
+ object->size = kfence_ksize((void *)ptr) ?: size;
+ object->min_count = min_count;
+ object->jiffies = jiffies;
+
untagged_ptr = (unsigned long)kasan_reset_tag((void *)ptr);
/*
* Only update min_addr and max_addr with object
@@ -1150,6 +1152,7 @@ EXPORT_SYMBOL_GPL(kmemleak_free_percpu);
void __ref kmemleak_update_trace(const void *ptr)
{
struct kmemleak_object *object;
+ depot_stack_handle_t trace_handle;
unsigned long flags;
pr_debug("%s(0x%px)\n", __func__, ptr);
@@ -1166,8 +1169,9 @@ void __ref kmemleak_update_trace(const void *ptr)
return;
}
+ trace_handle = set_track_prepare();
raw_spin_lock_irqsave(&object->lock, flags);
- object->trace_handle = set_track_prepare();
+ object->trace_handle = trace_handle;
raw_spin_unlock_irqrestore(&object->lock, flags);
put_object(object);
diff --git a/mm/madvise.c b/mm/madvise.c
index cf4d694280e9..6214a1ab5654 100644
--- a/mm/madvise.c
+++ b/mm/madvise.c
@@ -335,6 +335,7 @@ static int madvise_cold_or_pageout_pte_range(pmd_t *pmd,
struct folio *folio = NULL;
LIST_HEAD(folio_list);
bool pageout_anon_only_filter;
+ unsigned int batch_count = 0;
if (fatal_signal_pending(current))
return -EINTR;
@@ -416,6 +417,7 @@ huge_unlock:
regular_folio:
#endif
tlb_change_page_size(tlb, PAGE_SIZE);
+restart:
start_pte = pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
if (!start_pte)
return 0;
@@ -424,6 +426,15 @@ regular_folio:
for (; addr < end; pte++, addr += PAGE_SIZE) {
ptent = ptep_get(pte);
+ if (++batch_count == SWAP_CLUSTER_MAX) {
+ batch_count = 0;
+ if (need_resched()) {
+ pte_unmap_unlock(start_pte, ptl);
+ cond_resched();
+ goto restart;
+ }
+ }
+
if (pte_none(ptent))
continue;
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 1c1061df9cd1..b226090fd906 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -3166,6 +3166,7 @@ __always_inline struct obj_cgroup *current_obj_cgroup(void)
return NULL;
from_memcg:
+ objcg = NULL;
for (; !mem_cgroup_is_root(memcg); memcg = parent_mem_cgroup(memcg)) {
/*
* Memcg pointer is protected by scope (see set_active_memcg())
@@ -3176,7 +3177,6 @@ from_memcg:
objcg = rcu_dereference_check(memcg->objcg, 1);
if (likely(objcg))
break;
- objcg = NULL;
}
return objcg;
diff --git a/mm/memory.c b/mm/memory.c
index 1f18ed4a5497..5c757fba8858 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1517,6 +1517,7 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
continue;
} else {
/* We should have covered all the swap entry types */
+ pr_alert("unrecognized swap entry 0x%lx\n", entry.val);
WARN_ON_ONCE(1);
}
pte_clear_not_present_full(mm, addr, pte, tlb->fullmm);
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index ab41a511e20a..7a5fc89a8652 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -1129,6 +1129,9 @@ void mhp_deinit_memmap_on_memory(unsigned long pfn, unsigned long nr_pages)
kasan_remove_zero_shadow(__va(PFN_PHYS(pfn)), PFN_PHYS(nr_pages));
}
+/*
+ * Must be called with mem_hotplug_lock in write mode.
+ */
int __ref online_pages(unsigned long pfn, unsigned long nr_pages,
struct zone *zone, struct memory_group *group)
{
@@ -1149,7 +1152,6 @@ int __ref online_pages(unsigned long pfn, unsigned long nr_pages,
!IS_ALIGNED(pfn + nr_pages, PAGES_PER_SECTION)))
return -EINVAL;
- mem_hotplug_begin();
/* associate pfn range with the zone */
move_pfn_range_to_zone(zone, pfn, nr_pages, NULL, MIGRATE_ISOLATE);
@@ -1208,7 +1210,6 @@ int __ref online_pages(unsigned long pfn, unsigned long nr_pages,
writeback_set_ratelimit();
memory_notify(MEM_ONLINE, &arg);
- mem_hotplug_done();
return 0;
failed_addition:
@@ -1217,7 +1218,6 @@ failed_addition:
(((unsigned long long) pfn + nr_pages) << PAGE_SHIFT) - 1);
memory_notify(MEM_CANCEL_ONLINE, &arg);
remove_pfn_range_from_zone(zone, pfn, nr_pages);
- mem_hotplug_done();
return ret;
}
@@ -1458,7 +1458,7 @@ int __ref add_memory_resource(int nid, struct resource *res, mhp_t mhp_flags)
/* create memory block devices after memory was added */
ret = create_memory_block_devices(start, size, params.altmap, group);
if (ret) {
- arch_remove_memory(start, size, NULL);
+ arch_remove_memory(start, size, params.altmap);
goto error_free;
}
@@ -1863,6 +1863,9 @@ static int count_system_ram_pages_cb(unsigned long start_pfn,
return 0;
}
+/*
+ * Must be called with mem_hotplug_lock in write mode.
+ */
int __ref offline_pages(unsigned long start_pfn, unsigned long nr_pages,
struct zone *zone, struct memory_group *group)
{
@@ -1885,8 +1888,6 @@ int __ref offline_pages(unsigned long start_pfn, unsigned long nr_pages,
!IS_ALIGNED(start_pfn + nr_pages, PAGES_PER_SECTION)))
return -EINVAL;
- mem_hotplug_begin();
-
/*
* Don't allow to offline memory blocks that contain holes.
* Consequently, memory blocks with holes can never get onlined
@@ -2031,7 +2032,6 @@ int __ref offline_pages(unsigned long start_pfn, unsigned long nr_pages,
memory_notify(MEM_OFFLINE, &arg);
remove_pfn_range_from_zone(zone, start_pfn, nr_pages);
- mem_hotplug_done();
return 0;
failed_removal_isolated:
@@ -2046,7 +2046,6 @@ failed_removal:
(unsigned long long) start_pfn << PAGE_SHIFT,
((unsigned long long) end_pfn << PAGE_SHIFT) - 1,
reason);
- mem_hotplug_done();
return ret;
}
diff --git a/net/core/drop_monitor.c b/net/core/drop_monitor.c
index aff31cd944c2..b240d9aae4a6 100644
--- a/net/core/drop_monitor.c
+++ b/net/core/drop_monitor.c
@@ -183,7 +183,7 @@ out:
}
static const struct genl_multicast_group dropmon_mcgrps[] = {
- { .name = "events", },
+ { .name = "events", .cap_sys_admin = 1 },
};
static void send_dm_alert(struct work_struct *work)
@@ -1619,11 +1619,13 @@ static const struct genl_small_ops dropmon_ops[] = {
.cmd = NET_DM_CMD_START,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = net_dm_cmd_trace,
+ .flags = GENL_ADMIN_PERM,
},
{
.cmd = NET_DM_CMD_STOP,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = net_dm_cmd_trace,
+ .flags = GENL_ADMIN_PERM,
},
{
.cmd = NET_DM_CMD_CONFIG_GET,
diff --git a/net/core/filter.c b/net/core/filter.c
index 7e4d7c3bcc84..1737884be52f 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -2602,6 +2602,22 @@ BPF_CALL_2(bpf_msg_cork_bytes, struct sk_msg *, msg, u32, bytes)
return 0;
}
+static void sk_msg_reset_curr(struct sk_msg *msg)
+{
+ u32 i = msg->sg.start;
+ u32 len = 0;
+
+ do {
+ len += sk_msg_elem(msg, i)->length;
+ sk_msg_iter_var_next(i);
+ if (len >= msg->sg.size)
+ break;
+ } while (i != msg->sg.end);
+
+ msg->sg.curr = i;
+ msg->sg.copybreak = 0;
+}
+
static const struct bpf_func_proto bpf_msg_cork_bytes_proto = {
.func = bpf_msg_cork_bytes,
.gpl_only = false,
@@ -2721,6 +2737,7 @@ BPF_CALL_4(bpf_msg_pull_data, struct sk_msg *, msg, u32, start,
msg->sg.end - shift + NR_MSG_FRAG_IDS :
msg->sg.end - shift;
out:
+ sk_msg_reset_curr(msg);
msg->data = sg_virt(&msg->sg.data[first_sge]) + start - offset;
msg->data_end = msg->data + bytes;
return 0;
@@ -2857,6 +2874,7 @@ BPF_CALL_4(bpf_msg_push_data, struct sk_msg *, msg, u32, start,
msg->sg.data[new] = rsge;
}
+ sk_msg_reset_curr(msg);
sk_msg_compute_data_pointers(msg);
return 0;
}
@@ -3025,6 +3043,7 @@ BPF_CALL_4(bpf_msg_pop_data, struct sk_msg *, msg, u32, start,
sk_mem_uncharge(msg->sk, len - pop);
msg->sg.size -= (len - pop);
+ sk_msg_reset_curr(msg);
sk_msg_compute_data_pointers(msg);
return 0;
}
diff --git a/net/core/scm.c b/net/core/scm.c
index 880027ecf516..7dc47c17d863 100644
--- a/net/core/scm.c
+++ b/net/core/scm.c
@@ -26,6 +26,7 @@
#include <linux/nsproxy.h>
#include <linux/slab.h>
#include <linux/errqueue.h>
+#include <linux/io_uring.h>
#include <linux/uaccess.h>
@@ -103,6 +104,11 @@ static int scm_fp_copy(struct cmsghdr *cmsg, struct scm_fp_list **fplp)
if (fd < 0 || !(file = fget_raw(fd)))
return -EBADF;
+ /* don't allow io_uring files */
+ if (io_uring_get_socket(file)) {
+ fput(file);
+ return -EINVAL;
+ }
*fpp++ = file;
fpl->count++;
}
diff --git a/net/core/skmsg.c b/net/core/skmsg.c
index 6c31eefbd777..93ecfceac1bc 100644
--- a/net/core/skmsg.c
+++ b/net/core/skmsg.c
@@ -826,6 +826,8 @@ static void sk_psock_destroy(struct work_struct *work)
if (psock->sk_redir)
sock_put(psock->sk_redir);
+ if (psock->sk_pair)
+ sock_put(psock->sk_pair);
sock_put(psock->sk);
kfree(psock);
}
diff --git a/net/ethtool/netlink.c b/net/ethtool/netlink.c
index 3bbd5afb7b31..fe3553f60bf3 100644
--- a/net/ethtool/netlink.c
+++ b/net/ethtool/netlink.c
@@ -505,6 +505,7 @@ static int ethnl_default_dumpit(struct sk_buff *skb,
ret = skb->len;
break;
}
+ ret = 0;
}
rtnl_unlock();
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index 76c3ea75b8dd..efeeca2b1328 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -216,8 +216,10 @@ static void igmp_start_timer(struct ip_mc_list *im, int max_delay)
int tv = get_random_u32_below(max_delay);
im->tm_running = 1;
- if (!mod_timer(&im->timer, jiffies+tv+2))
- refcount_inc(&im->refcnt);
+ if (refcount_inc_not_zero(&im->refcnt)) {
+ if (mod_timer(&im->timer, jiffies + tv + 2))
+ ip_ma_put(im);
+ }
}
static void igmp_gq_start_timer(struct in_device *in_dev)
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index 22a26d1d29a0..5169c3c72cff 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -635,15 +635,18 @@ static netdev_tx_t ipgre_xmit(struct sk_buff *skb,
}
if (dev->header_ops) {
+ int pull_len = tunnel->hlen + sizeof(struct iphdr);
+
if (skb_cow_head(skb, 0))
goto free_skb;
tnl_params = (const struct iphdr *)skb->data;
- /* Pull skb since ip_tunnel_xmit() needs skb->data pointing
- * to gre header.
- */
- skb_pull(skb, tunnel->hlen + sizeof(struct iphdr));
+ if (!pskb_network_may_pull(skb, pull_len))
+ goto free_skb;
+
+ /* ip_tunnel_xmit() needs skb->data pointing to gre header. */
+ skb_pull(skb, pull_len);
skb_reset_mac_header(skb);
if (skb->ip_summed == CHECKSUM_PARTIAL &&
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 53bcc17c91e4..ff6838ca2e58 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -3368,9 +3368,25 @@ int tcp_set_window_clamp(struct sock *sk, int val)
return -EINVAL;
tp->window_clamp = 0;
} else {
- tp->window_clamp = val < SOCK_MIN_RCVBUF / 2 ?
- SOCK_MIN_RCVBUF / 2 : val;
- tp->rcv_ssthresh = min(tp->rcv_wnd, tp->window_clamp);
+ u32 new_rcv_ssthresh, old_window_clamp = tp->window_clamp;
+ u32 new_window_clamp = val < SOCK_MIN_RCVBUF / 2 ?
+ SOCK_MIN_RCVBUF / 2 : val;
+
+ if (new_window_clamp == old_window_clamp)
+ return 0;
+
+ tp->window_clamp = new_window_clamp;
+ if (new_window_clamp < old_window_clamp) {
+ /* need to apply the reserved mem provisioning only
+ * when shrinking the window clamp
+ */
+ __tcp_adjust_rcv_ssthresh(sk, tp->window_clamp);
+
+ } else {
+ new_rcv_ssthresh = min(tp->rcv_wnd, tp->window_clamp);
+ tp->rcv_ssthresh = max(new_rcv_ssthresh,
+ tp->rcv_ssthresh);
+ }
}
return 0;
}
@@ -3594,6 +3610,10 @@ int do_tcp_setsockopt(struct sock *sk, int level, int optname,
break;
case TCP_AO_REPAIR:
+ if (!tcp_can_repair_sock(sk)) {
+ err = -EPERM;
+ break;
+ }
err = tcp_ao_set_repair(sk, optval, optlen);
break;
#ifdef CONFIG_TCP_AO
@@ -4293,6 +4313,8 @@ zerocopy_rcv_out:
}
#endif
case TCP_AO_REPAIR:
+ if (!tcp_can_repair_sock(sk))
+ return -EPERM;
return tcp_ao_get_repair(sk, optval, optlen);
case TCP_AO_GET_KEYS:
case TCP_AO_INFO: {
diff --git a/net/ipv4/tcp_ao.c b/net/ipv4/tcp_ao.c
index 7696417d0640..f8308d3f565e 100644
--- a/net/ipv4/tcp_ao.c
+++ b/net/ipv4/tcp_ao.c
@@ -851,7 +851,7 @@ void tcp_ao_syncookie(struct sock *sk, const struct sk_buff *skb,
const struct tcp_ao_hdr *aoh;
struct tcp_ao_key *key;
- treq->maclen = 0;
+ treq->used_tcp_ao = false;
if (tcp_parse_auth_options(th, NULL, &aoh) || !aoh)
return;
@@ -863,7 +863,7 @@ void tcp_ao_syncookie(struct sock *sk, const struct sk_buff *skb,
treq->ao_rcv_next = aoh->keyid;
treq->ao_keyid = aoh->rnext_keyid;
- treq->maclen = tcp_ao_maclen(key);
+ treq->used_tcp_ao = true;
}
static enum skb_drop_reason
@@ -1100,7 +1100,7 @@ void tcp_ao_connect_init(struct sock *sk)
ao_info->current_key = key;
if (!ao_info->rnext_key)
ao_info->rnext_key = key;
- tp->tcp_header_len += tcp_ao_len(key);
+ tp->tcp_header_len += tcp_ao_len_aligned(key);
ao_info->lisn = htonl(tp->write_seq);
ao_info->snd_sne = 0;
@@ -1346,7 +1346,7 @@ static int tcp_ao_parse_crypto(struct tcp_ao_add *cmd, struct tcp_ao_key *key)
syn_tcp_option_space -= TCPOLEN_MSS_ALIGNED;
syn_tcp_option_space -= TCPOLEN_TSTAMP_ALIGNED;
syn_tcp_option_space -= TCPOLEN_WSCALE_ALIGNED;
- if (tcp_ao_len(key) > syn_tcp_option_space) {
+ if (tcp_ao_len_aligned(key) > syn_tcp_option_space) {
err = -EMSGSIZE;
goto err_kfree;
}
@@ -1608,6 +1608,15 @@ static int tcp_ao_add_cmd(struct sock *sk, unsigned short int family,
if (!dev || !l3index)
return -EINVAL;
+ if (!bound_dev_if || bound_dev_if != cmd.ifindex) {
+ /* tcp_ao_established_key() doesn't expect having
+ * non peer-matching key on an established TCP-AO
+ * connection.
+ */
+ if (!((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE)))
+ return -EINVAL;
+ }
+
/* It's still possible to bind after adding keys or even
* re-bind to a different dev (with CAP_NET_RAW).
* So, no reason to return error here, rather try to be
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index bcb55d98004c..90de838a2745 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -3871,8 +3871,12 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
* then we can probably ignore it.
*/
if (before(ack, prior_snd_una)) {
+ u32 max_window;
+
+ /* do not accept ACK for bytes we never sent. */
+ max_window = min_t(u64, tp->max_window, tp->bytes_acked);
/* RFC 5961 5.2 [Blind Data Injection Attack].[Mitigation] */
- if (before(ack, prior_snd_una - tp->max_window)) {
+ if (before(ack, prior_snd_una - max_window)) {
if (!(flag & FLAG_NO_CHALLENGE_ACK))
tcp_send_challenge_ack(sk);
return -SKB_DROP_REASON_TCP_TOO_OLD_ACK;
@@ -7182,11 +7186,12 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops,
if (tcp_parse_auth_options(tcp_hdr(skb), NULL, &aoh))
goto drop_and_release; /* Invalid TCP options */
if (aoh) {
- tcp_rsk(req)->maclen = aoh->length - sizeof(struct tcp_ao_hdr);
+ tcp_rsk(req)->used_tcp_ao = true;
tcp_rsk(req)->ao_rcv_next = aoh->keyid;
tcp_rsk(req)->ao_keyid = aoh->rnext_keyid;
+
} else {
- tcp_rsk(req)->maclen = 0;
+ tcp_rsk(req)->used_tcp_ao = false;
}
#endif
tcp_rsk(req)->snt_isn = isn;
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 5f693bbd578d..0c50c5a32b84 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -690,7 +690,7 @@ static bool tcp_v4_ao_sign_reset(const struct sock *sk, struct sk_buff *skb,
reply_options[0] = htonl((TCPOPT_AO << 24) | (tcp_ao_len(key) << 16) |
(aoh->rnext_keyid << 8) | keyid);
- arg->iov[0].iov_len += round_up(tcp_ao_len(key), 4);
+ arg->iov[0].iov_len += tcp_ao_len_aligned(key);
reply->doff = arg->iov[0].iov_len / 4;
if (tcp_ao_hash_hdr(AF_INET, (char *)&reply_options[1],
@@ -978,7 +978,7 @@ static void tcp_v4_send_ack(const struct sock *sk,
(tcp_ao_len(key->ao_key) << 16) |
(key->ao_key->sndid << 8) |
key->rcv_next);
- arg.iov[0].iov_len += round_up(tcp_ao_len(key->ao_key), 4);
+ arg.iov[0].iov_len += tcp_ao_len_aligned(key->ao_key);
rep.th.doff = arg.iov[0].iov_len / 4;
tcp_ao_hash_hdr(AF_INET, (char *)&rep.opt[offset],
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index a9807eeb311c..9e85f2a0bddd 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -615,7 +615,7 @@ struct sock *tcp_create_openreq_child(const struct sock *sk,
ao_key = treq->af_specific->ao_lookup(sk, req,
tcp_rsk(req)->ao_keyid, -1);
if (ao_key)
- newtp->tcp_header_len += tcp_ao_len(ao_key);
+ newtp->tcp_header_len += tcp_ao_len_aligned(ao_key);
#endif
if (skb->len >= TCP_MSS_DEFAULT + newtp->tcp_header_len)
newicsk->icsk_ack.last_seg_size = skb->len - newtp->tcp_header_len;
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index eb13a55d660c..f5ef15e1d9ac 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -825,7 +825,7 @@ static unsigned int tcp_syn_options(struct sock *sk, struct sk_buff *skb,
timestamps = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_timestamps);
if (tcp_key_is_ao(key)) {
opts->options |= OPTION_AO;
- remaining -= tcp_ao_len(key->ao_key);
+ remaining -= tcp_ao_len_aligned(key->ao_key);
}
}
@@ -915,7 +915,7 @@ static unsigned int tcp_synack_options(const struct sock *sk,
ireq->tstamp_ok &= !ireq->sack_ok;
} else if (tcp_key_is_ao(key)) {
opts->options |= OPTION_AO;
- remaining -= tcp_ao_len(key->ao_key);
+ remaining -= tcp_ao_len_aligned(key->ao_key);
ireq->tstamp_ok &= !ireq->sack_ok;
}
@@ -982,7 +982,7 @@ static unsigned int tcp_established_options(struct sock *sk, struct sk_buff *skb
size += TCPOLEN_MD5SIG_ALIGNED;
} else if (tcp_key_is_ao(key)) {
opts->options |= OPTION_AO;
- size += tcp_ao_len(key->ao_key);
+ size += tcp_ao_len_aligned(key->ao_key);
}
if (likely(tp->rx_opt.tstamp_ok)) {
@@ -3720,7 +3720,6 @@ struct sk_buff *tcp_make_synack(const struct sock *sk, struct dst_entry *dst,
if (tcp_rsk_used_ao(req)) {
#ifdef CONFIG_TCP_AO
struct tcp_ao_key *ao_key = NULL;
- u8 maclen = tcp_rsk(req)->maclen;
u8 keyid = tcp_rsk(req)->ao_keyid;
ao_key = tcp_sk(sk)->af_specific->ao_lookup(sk, req_to_sk(req),
@@ -3730,13 +3729,11 @@ struct sk_buff *tcp_make_synack(const struct sock *sk, struct dst_entry *dst,
* for another peer-matching key, but the peer has requested
* ao_keyid (RFC5925 RNextKeyID), so let's keep it simple here.
*/
- if (unlikely(!ao_key || tcp_ao_maclen(ao_key) != maclen)) {
- u8 key_maclen = ao_key ? tcp_ao_maclen(ao_key) : 0;
-
+ if (unlikely(!ao_key)) {
rcu_read_unlock();
kfree_skb(skb);
- net_warn_ratelimited("TCP-AO: the keyid %u with maclen %u|%u from SYN packet is not present - not sending SYNACK\n",
- keyid, maclen, key_maclen);
+ net_warn_ratelimited("TCP-AO: the keyid %u from SYN packet is not present - not sending SYNACK\n",
+ keyid);
return NULL;
}
key.ao_key = ao_key;
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index 28b01a068412..7772f42ff2b9 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -1511,13 +1511,9 @@ out:
if (!pn_leaf && !(pn->fn_flags & RTN_RTINFO)) {
pn_leaf = fib6_find_prefix(info->nl_net, table,
pn);
-#if RT6_DEBUG >= 2
- if (!pn_leaf) {
- WARN_ON(!pn_leaf);
+ if (!pn_leaf)
pn_leaf =
info->nl_net->ipv6.fib6_null_entry;
- }
-#endif
fib6_info_hold(pn_leaf);
rcu_assign_pointer(pn->leaf, pn_leaf);
}
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 937a02c2e534..8c6623496dd7 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -881,7 +881,7 @@ static void tcp_v6_send_response(const struct sock *sk, struct sk_buff *skb, u32
if (tcp_key_is_md5(key))
tot_len += TCPOLEN_MD5SIG_ALIGNED;
if (tcp_key_is_ao(key))
- tot_len += tcp_ao_len(key->ao_key);
+ tot_len += tcp_ao_len_aligned(key->ao_key);
#ifdef CONFIG_MPTCP
if (rst && !tcp_key_is_md5(key)) {
diff --git a/net/mac80211/Kconfig b/net/mac80211/Kconfig
index 037ab74f5ade..cb0291decf2e 100644
--- a/net/mac80211/Kconfig
+++ b/net/mac80211/Kconfig
@@ -88,7 +88,7 @@ config MAC80211_LEDS
config MAC80211_DEBUGFS
bool "Export mac80211 internals in DebugFS"
- depends on MAC80211 && DEBUG_FS
+ depends on MAC80211 && CFG80211_DEBUGFS
help
Select this to see extensive information about
the internal state of mac80211 in debugfs.
diff --git a/net/mac80211/debugfs_netdev.c b/net/mac80211/debugfs_netdev.c
index ec91e131b29e..80aeb25f1b68 100644
--- a/net/mac80211/debugfs_netdev.c
+++ b/net/mac80211/debugfs_netdev.c
@@ -22,88 +22,148 @@
#include "debugfs_netdev.h"
#include "driver-ops.h"
+struct ieee80211_if_read_sdata_data {
+ ssize_t (*format)(const struct ieee80211_sub_if_data *, char *, int);
+ struct ieee80211_sub_if_data *sdata;
+};
+
+static ssize_t ieee80211_if_read_sdata_handler(struct wiphy *wiphy,
+ struct file *file,
+ char *buf,
+ size_t bufsize,
+ void *data)
+{
+ struct ieee80211_if_read_sdata_data *d = data;
+
+ return d->format(d->sdata, buf, bufsize);
+}
+
static ssize_t ieee80211_if_read_sdata(
- struct ieee80211_sub_if_data *sdata,
+ struct file *file,
char __user *userbuf,
size_t count, loff_t *ppos,
ssize_t (*format)(const struct ieee80211_sub_if_data *sdata, char *, int))
{
+ struct ieee80211_sub_if_data *sdata = file->private_data;
+ struct ieee80211_if_read_sdata_data data = {
+ .format = format,
+ .sdata = sdata,
+ };
char buf[200];
- ssize_t ret = -EINVAL;
- wiphy_lock(sdata->local->hw.wiphy);
- ret = (*format)(sdata, buf, sizeof(buf));
- wiphy_unlock(sdata->local->hw.wiphy);
+ return wiphy_locked_debugfs_read(sdata->local->hw.wiphy,
+ file, buf, sizeof(buf),
+ userbuf, count, ppos,
+ ieee80211_if_read_sdata_handler,
+ &data);
+}
- if (ret >= 0)
- ret = simple_read_from_buffer(userbuf, count, ppos, buf, ret);
+struct ieee80211_if_write_sdata_data {
+ ssize_t (*write)(struct ieee80211_sub_if_data *, const char *, int);
+ struct ieee80211_sub_if_data *sdata;
+};
+
+static ssize_t ieee80211_if_write_sdata_handler(struct wiphy *wiphy,
+ struct file *file,
+ char *buf,
+ size_t count,
+ void *data)
+{
+ struct ieee80211_if_write_sdata_data *d = data;
- return ret;
+ return d->write(d->sdata, buf, count);
}
static ssize_t ieee80211_if_write_sdata(
- struct ieee80211_sub_if_data *sdata,
+ struct file *file,
const char __user *userbuf,
size_t count, loff_t *ppos,
ssize_t (*write)(struct ieee80211_sub_if_data *sdata, const char *, int))
{
+ struct ieee80211_sub_if_data *sdata = file->private_data;
+ struct ieee80211_if_write_sdata_data data = {
+ .write = write,
+ .sdata = sdata,
+ };
char buf[64];
- ssize_t ret;
- if (count >= sizeof(buf))
- return -E2BIG;
+ return wiphy_locked_debugfs_write(sdata->local->hw.wiphy,
+ file, buf, sizeof(buf),
+ userbuf, count,
+ ieee80211_if_write_sdata_handler,
+ &data);
+}
- if (copy_from_user(buf, userbuf, count))
- return -EFAULT;
- buf[count] = '\0';
+struct ieee80211_if_read_link_data {
+ ssize_t (*format)(const struct ieee80211_link_data *, char *, int);
+ struct ieee80211_link_data *link;
+};
- wiphy_lock(sdata->local->hw.wiphy);
- ret = (*write)(sdata, buf, count);
- wiphy_unlock(sdata->local->hw.wiphy);
+static ssize_t ieee80211_if_read_link_handler(struct wiphy *wiphy,
+ struct file *file,
+ char *buf,
+ size_t bufsize,
+ void *data)
+{
+ struct ieee80211_if_read_link_data *d = data;
- return ret;
+ return d->format(d->link, buf, bufsize);
}
static ssize_t ieee80211_if_read_link(
- struct ieee80211_link_data *link,
+ struct file *file,
char __user *userbuf,
size_t count, loff_t *ppos,
ssize_t (*format)(const struct ieee80211_link_data *link, char *, int))
{
+ struct ieee80211_link_data *link = file->private_data;
+ struct ieee80211_if_read_link_data data = {
+ .format = format,
+ .link = link,
+ };
char buf[200];
- ssize_t ret = -EINVAL;
- wiphy_lock(link->sdata->local->hw.wiphy);
- ret = (*format)(link, buf, sizeof(buf));
- wiphy_unlock(link->sdata->local->hw.wiphy);
+ return wiphy_locked_debugfs_read(link->sdata->local->hw.wiphy,
+ file, buf, sizeof(buf),
+ userbuf, count, ppos,
+ ieee80211_if_read_link_handler,
+ &data);
+}
+
+struct ieee80211_if_write_link_data {
+ ssize_t (*write)(struct ieee80211_link_data *, const char *, int);
+ struct ieee80211_link_data *link;
+};
- if (ret >= 0)
- ret = simple_read_from_buffer(userbuf, count, ppos, buf, ret);
+static ssize_t ieee80211_if_write_link_handler(struct wiphy *wiphy,
+ struct file *file,
+ char *buf,
+ size_t count,
+ void *data)
+{
+ struct ieee80211_if_write_sdata_data *d = data;
- return ret;
+ return d->write(d->sdata, buf, count);
}
static ssize_t ieee80211_if_write_link(
- struct ieee80211_link_data *link,
+ struct file *file,
const char __user *userbuf,
size_t count, loff_t *ppos,
ssize_t (*write)(struct ieee80211_link_data *link, const char *, int))
{
+ struct ieee80211_link_data *link = file->private_data;
+ struct ieee80211_if_write_link_data data = {
+ .write = write,
+ .link = link,
+ };
char buf[64];
- ssize_t ret;
-
- if (count >= sizeof(buf))
- return -E2BIG;
-
- if (copy_from_user(buf, userbuf, count))
- return -EFAULT;
- buf[count] = '\0';
-
- wiphy_lock(link->sdata->local->hw.wiphy);
- ret = (*write)(link, buf, count);
- wiphy_unlock(link->sdata->local->hw.wiphy);
- return ret;
+ return wiphy_locked_debugfs_write(link->sdata->local->hw.wiphy,
+ file, buf, sizeof(buf),
+ userbuf, count,
+ ieee80211_if_write_link_handler,
+ &data);
}
#define IEEE80211_IF_FMT(name, type, field, format_string) \
@@ -173,7 +233,7 @@ static ssize_t ieee80211_if_read_##name(struct file *file, \
char __user *userbuf, \
size_t count, loff_t *ppos) \
{ \
- return ieee80211_if_read_sdata(file->private_data, \
+ return ieee80211_if_read_sdata(file, \
userbuf, count, ppos, \
ieee80211_if_fmt_##name); \
}
@@ -183,7 +243,7 @@ static ssize_t ieee80211_if_write_##name(struct file *file, \
const char __user *userbuf, \
size_t count, loff_t *ppos) \
{ \
- return ieee80211_if_write_sdata(file->private_data, userbuf, \
+ return ieee80211_if_write_sdata(file, userbuf, \
count, ppos, \
ieee80211_if_parse_##name); \
}
@@ -211,7 +271,7 @@ static ssize_t ieee80211_if_read_##name(struct file *file, \
char __user *userbuf, \
size_t count, loff_t *ppos) \
{ \
- return ieee80211_if_read_link(file->private_data, \
+ return ieee80211_if_read_link(file, \
userbuf, count, ppos, \
ieee80211_if_fmt_##name); \
}
@@ -221,7 +281,7 @@ static ssize_t ieee80211_if_write_##name(struct file *file, \
const char __user *userbuf, \
size_t count, loff_t *ppos) \
{ \
- return ieee80211_if_write_link(file->private_data, userbuf, \
+ return ieee80211_if_write_link(file, userbuf, \
count, ppos, \
ieee80211_if_parse_##name); \
}
diff --git a/net/mac80211/debugfs_sta.c b/net/mac80211/debugfs_sta.c
index 06e3613bf46b..5bf507ebb096 100644
--- a/net/mac80211/debugfs_sta.c
+++ b/net/mac80211/debugfs_sta.c
@@ -312,23 +312,14 @@ static ssize_t sta_aql_write(struct file *file, const char __user *userbuf,
STA_OPS_RW(aql);
-static ssize_t sta_agg_status_read(struct file *file, char __user *userbuf,
- size_t count, loff_t *ppos)
+static ssize_t sta_agg_status_do_read(struct wiphy *wiphy, struct file *file,
+ char *buf, size_t bufsz, void *data)
{
- char *buf, *p;
- ssize_t bufsz = 71 + IEEE80211_NUM_TIDS * 40;
+ struct sta_info *sta = data;
+ char *p = buf;
int i;
- struct sta_info *sta = file->private_data;
struct tid_ampdu_rx *tid_rx;
struct tid_ampdu_tx *tid_tx;
- ssize_t ret;
-
- buf = kzalloc(bufsz, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
- p = buf;
-
- rcu_read_lock();
p += scnprintf(p, bufsz + buf - p, "next dialog_token: %#02x\n",
sta->ampdu_mlme.dialog_token_allocator + 1);
@@ -338,8 +329,8 @@ static ssize_t sta_agg_status_read(struct file *file, char __user *userbuf,
for (i = 0; i < IEEE80211_NUM_TIDS; i++) {
bool tid_rx_valid;
- tid_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[i]);
- tid_tx = rcu_dereference(sta->ampdu_mlme.tid_tx[i]);
+ tid_rx = wiphy_dereference(wiphy, sta->ampdu_mlme.tid_rx[i]);
+ tid_tx = wiphy_dereference(wiphy, sta->ampdu_mlme.tid_tx[i]);
tid_rx_valid = test_bit(i, sta->ampdu_mlme.agg_session_valid);
p += scnprintf(p, bufsz + buf - p, "%02d", i);
@@ -358,31 +349,39 @@ static ssize_t sta_agg_status_read(struct file *file, char __user *userbuf,
tid_tx ? skb_queue_len(&tid_tx->pending) : 0);
p += scnprintf(p, bufsz + buf - p, "\n");
}
- rcu_read_unlock();
- ret = simple_read_from_buffer(userbuf, count, ppos, buf, p - buf);
+ return p - buf;
+}
+
+static ssize_t sta_agg_status_read(struct file *file, char __user *userbuf,
+ size_t count, loff_t *ppos)
+{
+ struct sta_info *sta = file->private_data;
+ struct wiphy *wiphy = sta->local->hw.wiphy;
+ size_t bufsz = 71 + IEEE80211_NUM_TIDS * 40;
+ char *buf = kmalloc(bufsz, GFP_KERNEL);
+ ssize_t ret;
+
+ if (!buf)
+ return -ENOMEM;
+
+ ret = wiphy_locked_debugfs_read(wiphy, file, buf, bufsz,
+ userbuf, count, ppos,
+ sta_agg_status_do_read, sta);
kfree(buf);
+
return ret;
}
-static ssize_t sta_agg_status_write(struct file *file, const char __user *userbuf,
- size_t count, loff_t *ppos)
+static ssize_t sta_agg_status_do_write(struct wiphy *wiphy, struct file *file,
+ char *buf, size_t count, void *data)
{
- char _buf[25] = {}, *buf = _buf;
- struct sta_info *sta = file->private_data;
+ struct sta_info *sta = data;
bool start, tx;
unsigned long tid;
- char *pos;
+ char *pos = buf;
int ret, timeout = 5000;
- if (count > sizeof(_buf))
- return -EINVAL;
-
- if (copy_from_user(buf, userbuf, count))
- return -EFAULT;
-
- buf[sizeof(_buf) - 1] = '\0';
- pos = buf;
buf = strsep(&pos, " ");
if (!buf)
return -EINVAL;
@@ -420,7 +419,6 @@ static ssize_t sta_agg_status_write(struct file *file, const char __user *userbu
if (ret || tid >= IEEE80211_NUM_TIDS)
return -EINVAL;
- wiphy_lock(sta->local->hw.wiphy);
if (tx) {
if (start)
ret = ieee80211_start_tx_ba_session(&sta->sta, tid,
@@ -432,10 +430,22 @@ static ssize_t sta_agg_status_write(struct file *file, const char __user *userbu
3, true);
ret = 0;
}
- wiphy_unlock(sta->local->hw.wiphy);
return ret ?: count;
}
+
+static ssize_t sta_agg_status_write(struct file *file,
+ const char __user *userbuf,
+ size_t count, loff_t *ppos)
+{
+ struct sta_info *sta = file->private_data;
+ struct wiphy *wiphy = sta->local->hw.wiphy;
+ char _buf[26];
+
+ return wiphy_locked_debugfs_write(wiphy, file, _buf, sizeof(_buf),
+ userbuf, count,
+ sta_agg_status_do_write, sta);
+}
STA_OPS_RW(agg_status);
/* link sta attributes */
diff --git a/net/mac80211/driver-ops.h b/net/mac80211/driver-ops.h
index 568633b38c47..f690c385a345 100644
--- a/net/mac80211/driver-ops.h
+++ b/net/mac80211/driver-ops.h
@@ -23,7 +23,7 @@
static inline struct ieee80211_sub_if_data *
get_bss_sdata(struct ieee80211_sub_if_data *sdata)
{
- if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
+ if (sdata && sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
sdata = container_of(sdata->bss, struct ieee80211_sub_if_data,
u.ap);
@@ -695,11 +695,14 @@ static inline void drv_flush(struct ieee80211_local *local,
struct ieee80211_sub_if_data *sdata,
u32 queues, bool drop)
{
- struct ieee80211_vif *vif = sdata ? &sdata->vif : NULL;
+ struct ieee80211_vif *vif;
might_sleep();
lockdep_assert_wiphy(local->hw.wiphy);
+ sdata = get_bss_sdata(sdata);
+ vif = sdata ? &sdata->vif : NULL;
+
if (sdata && !check_sdata_in_driver(sdata))
return;
@@ -716,6 +719,8 @@ static inline void drv_flush_sta(struct ieee80211_local *local,
might_sleep();
lockdep_assert_wiphy(local->hw.wiphy);
+ sdata = get_bss_sdata(sdata);
+
if (sdata && !check_sdata_in_driver(sdata))
return;
diff --git a/net/mac80211/ht.c b/net/mac80211/ht.c
index 68cea2685224..749f4ecab990 100644
--- a/net/mac80211/ht.c
+++ b/net/mac80211/ht.c
@@ -271,6 +271,7 @@ bool ieee80211_ht_cap_ie_to_sta_ht_cap(struct ieee80211_sub_if_data *sdata,
case NL80211_CHAN_WIDTH_80:
case NL80211_CHAN_WIDTH_80P80:
case NL80211_CHAN_WIDTH_160:
+ case NL80211_CHAN_WIDTH_320:
bw = ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40 ?
IEEE80211_STA_RX_BW_40 : IEEE80211_STA_RX_BW_20;
break;
diff --git a/net/mptcp/options.c b/net/mptcp/options.c
index cd15ec73073e..c53914012d01 100644
--- a/net/mptcp/options.c
+++ b/net/mptcp/options.c
@@ -108,6 +108,7 @@ static void mptcp_parse_option(const struct sk_buff *skb,
mp_opt->suboptions |= OPTION_MPTCP_DSS;
mp_opt->use_map = 1;
mp_opt->mpc_map = 1;
+ mp_opt->use_ack = 0;
mp_opt->data_len = get_unaligned_be16(ptr);
ptr += 2;
}
diff --git a/net/netfilter/nf_bpf_link.c b/net/netfilter/nf_bpf_link.c
index e502ec00b2fe..0e4beae421f8 100644
--- a/net/netfilter/nf_bpf_link.c
+++ b/net/netfilter/nf_bpf_link.c
@@ -31,7 +31,7 @@ struct bpf_nf_link {
#if IS_ENABLED(CONFIG_NF_DEFRAG_IPV4) || IS_ENABLED(CONFIG_NF_DEFRAG_IPV6)
static const struct nf_defrag_hook *
get_proto_defrag_hook(struct bpf_nf_link *link,
- const struct nf_defrag_hook __rcu *global_hook,
+ const struct nf_defrag_hook __rcu **ptr_global_hook,
const char *mod)
{
const struct nf_defrag_hook *hook;
@@ -39,7 +39,7 @@ get_proto_defrag_hook(struct bpf_nf_link *link,
/* RCU protects us from races against module unloading */
rcu_read_lock();
- hook = rcu_dereference(global_hook);
+ hook = rcu_dereference(*ptr_global_hook);
if (!hook) {
rcu_read_unlock();
err = request_module(mod);
@@ -47,7 +47,7 @@ get_proto_defrag_hook(struct bpf_nf_link *link,
return ERR_PTR(err < 0 ? err : -EINVAL);
rcu_read_lock();
- hook = rcu_dereference(global_hook);
+ hook = rcu_dereference(*ptr_global_hook);
}
if (hook && try_module_get(hook->owner)) {
@@ -78,7 +78,7 @@ static int bpf_nf_enable_defrag(struct bpf_nf_link *link)
switch (link->hook_ops.pf) {
#if IS_ENABLED(CONFIG_NF_DEFRAG_IPV4)
case NFPROTO_IPV4:
- hook = get_proto_defrag_hook(link, nf_defrag_v4_hook, "nf_defrag_ipv4");
+ hook = get_proto_defrag_hook(link, &nf_defrag_v4_hook, "nf_defrag_ipv4");
if (IS_ERR(hook))
return PTR_ERR(hook);
@@ -87,7 +87,7 @@ static int bpf_nf_enable_defrag(struct bpf_nf_link *link)
#endif
#if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6)
case NFPROTO_IPV6:
- hook = get_proto_defrag_hook(link, nf_defrag_v6_hook, "nf_defrag_ipv6");
+ hook = get_proto_defrag_hook(link, &nf_defrag_v6_hook, "nf_defrag_ipv6");
if (IS_ERR(hook))
return PTR_ERR(hook);
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index c0a42989b982..c5c17c6e80ed 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -803,7 +803,7 @@ static struct nft_table *nft_table_lookup(const struct net *net,
static struct nft_table *nft_table_lookup_byhandle(const struct net *net,
const struct nlattr *nla,
- u8 genmask, u32 nlpid)
+ int family, u8 genmask, u32 nlpid)
{
struct nftables_pernet *nft_net;
struct nft_table *table;
@@ -811,6 +811,7 @@ static struct nft_table *nft_table_lookup_byhandle(const struct net *net,
nft_net = nft_pernet(net);
list_for_each_entry(table, &nft_net->tables, list) {
if (be64_to_cpu(nla_get_be64(nla)) == table->handle &&
+ table->family == family &&
nft_active_genmask(table, genmask)) {
if (nft_table_has_owner(table) &&
nlpid && table->nlpid != nlpid)
@@ -1544,7 +1545,7 @@ static int nf_tables_deltable(struct sk_buff *skb, const struct nfnl_info *info,
if (nla[NFTA_TABLE_HANDLE]) {
attr = nla[NFTA_TABLE_HANDLE];
- table = nft_table_lookup_byhandle(net, attr, genmask,
+ table = nft_table_lookup_byhandle(net, attr, family, genmask,
NETLINK_CB(skb).portid);
} else {
attr = nla[NFTA_TABLE_NAME];
diff --git a/net/netfilter/nft_dynset.c b/net/netfilter/nft_dynset.c
index b18a79039125..c09dba57354c 100644
--- a/net/netfilter/nft_dynset.c
+++ b/net/netfilter/nft_dynset.c
@@ -280,10 +280,15 @@ static int nft_dynset_init(const struct nft_ctx *ctx,
priv->expr_array[i] = dynset_expr;
priv->num_exprs++;
- if (set->num_exprs &&
- dynset_expr->ops != set->exprs[i]->ops) {
- err = -EOPNOTSUPP;
- goto err_expr_free;
+ if (set->num_exprs) {
+ if (i >= set->num_exprs) {
+ err = -EINVAL;
+ goto err_expr_free;
+ }
+ if (dynset_expr->ops != set->exprs[i]->ops) {
+ err = -EOPNOTSUPP;
+ goto err_expr_free;
+ }
}
i++;
}
diff --git a/net/netfilter/nft_exthdr.c b/net/netfilter/nft_exthdr.c
index 3fbaa7bf41f9..6eb571d0c3fd 100644
--- a/net/netfilter/nft_exthdr.c
+++ b/net/netfilter/nft_exthdr.c
@@ -214,7 +214,7 @@ static void nft_exthdr_tcp_eval(const struct nft_expr *expr,
offset = i + priv->offset;
if (priv->flags & NFT_EXTHDR_F_PRESENT) {
- *dest = 1;
+ nft_reg_store8(dest, 1);
} else {
if (priv->len % NFT_REG32_SIZE)
dest[priv->len / NFT_REG32_SIZE] = 0;
@@ -461,7 +461,7 @@ static void nft_exthdr_dccp_eval(const struct nft_expr *expr,
type = bufp[0];
if (type == priv->type) {
- *dest = 1;
+ nft_reg_store8(dest, 1);
return;
}
diff --git a/net/netfilter/nft_fib.c b/net/netfilter/nft_fib.c
index 1bfe258018da..37cfe6dd712d 100644
--- a/net/netfilter/nft_fib.c
+++ b/net/netfilter/nft_fib.c
@@ -145,11 +145,15 @@ void nft_fib_store_result(void *reg, const struct nft_fib *priv,
switch (priv->result) {
case NFT_FIB_RESULT_OIF:
index = dev ? dev->ifindex : 0;
- *dreg = (priv->flags & NFTA_FIB_F_PRESENT) ? !!index : index;
+ if (priv->flags & NFTA_FIB_F_PRESENT)
+ nft_reg_store8(dreg, !!index);
+ else
+ *dreg = index;
+
break;
case NFT_FIB_RESULT_OIFNAME:
if (priv->flags & NFTA_FIB_F_PRESENT)
- *dreg = !!dev;
+ nft_reg_store8(dreg, !!dev);
else
strscpy_pad(reg, dev ? dev->name : "", IFNAMSIZ);
break;
diff --git a/net/netfilter/nft_set_pipapo.c b/net/netfilter/nft_set_pipapo.c
index 701977af3ee8..7252fcdae349 100644
--- a/net/netfilter/nft_set_pipapo.c
+++ b/net/netfilter/nft_set_pipapo.c
@@ -2043,6 +2043,9 @@ static void nft_pipapo_walk(const struct nft_ctx *ctx, struct nft_set *set,
e = f->mt[r].e;
+ if (!nft_set_elem_active(&e->ext, iter->genmask))
+ goto cont;
+
iter->err = iter->fn(ctx, set, iter, &e->priv);
if (iter->err < 0)
goto out;
diff --git a/net/netfilter/xt_owner.c b/net/netfilter/xt_owner.c
index e85ce69924ae..50332888c8d2 100644
--- a/net/netfilter/xt_owner.c
+++ b/net/netfilter/xt_owner.c
@@ -76,18 +76,23 @@ owner_mt(const struct sk_buff *skb, struct xt_action_param *par)
*/
return false;
- filp = sk->sk_socket->file;
- if (filp == NULL)
+ read_lock_bh(&sk->sk_callback_lock);
+ filp = sk->sk_socket ? sk->sk_socket->file : NULL;
+ if (filp == NULL) {
+ read_unlock_bh(&sk->sk_callback_lock);
return ((info->match ^ info->invert) &
(XT_OWNER_UID | XT_OWNER_GID)) == 0;
+ }
if (info->match & XT_OWNER_UID) {
kuid_t uid_min = make_kuid(net->user_ns, info->uid_min);
kuid_t uid_max = make_kuid(net->user_ns, info->uid_max);
if ((uid_gte(filp->f_cred->fsuid, uid_min) &&
uid_lte(filp->f_cred->fsuid, uid_max)) ^
- !(info->invert & XT_OWNER_UID))
+ !(info->invert & XT_OWNER_UID)) {
+ read_unlock_bh(&sk->sk_callback_lock);
return false;
+ }
}
if (info->match & XT_OWNER_GID) {
@@ -112,10 +117,13 @@ owner_mt(const struct sk_buff *skb, struct xt_action_param *par)
}
}
- if (match ^ !(info->invert & XT_OWNER_GID))
+ if (match ^ !(info->invert & XT_OWNER_GID)) {
+ read_unlock_bh(&sk->sk_callback_lock);
return false;
+ }
}
+ read_unlock_bh(&sk->sk_callback_lock);
return true;
}
diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c
index 92ef5ed2e7b0..9c7ffd10df2a 100644
--- a/net/netlink/genetlink.c
+++ b/net/netlink/genetlink.c
@@ -1691,6 +1691,9 @@ static int genl_bind(struct net *net, int group)
if ((grp->flags & GENL_UNS_ADMIN_PERM) &&
!ns_capable(net->user_ns, CAP_NET_ADMIN))
ret = -EPERM;
+ if (grp->cap_sys_admin &&
+ !ns_capable(net->user_ns, CAP_SYS_ADMIN))
+ ret = -EPERM;
break;
}
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index a84e00b5904b..7adf48549a3b 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -4300,7 +4300,7 @@ static void packet_mm_open(struct vm_area_struct *vma)
struct sock *sk = sock->sk;
if (sk)
- atomic_inc(&pkt_sk(sk)->mapped);
+ atomic_long_inc(&pkt_sk(sk)->mapped);
}
static void packet_mm_close(struct vm_area_struct *vma)
@@ -4310,7 +4310,7 @@ static void packet_mm_close(struct vm_area_struct *vma)
struct sock *sk = sock->sk;
if (sk)
- atomic_dec(&pkt_sk(sk)->mapped);
+ atomic_long_dec(&pkt_sk(sk)->mapped);
}
static const struct vm_operations_struct packet_mmap_ops = {
@@ -4405,7 +4405,7 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
err = -EBUSY;
if (!closing) {
- if (atomic_read(&po->mapped))
+ if (atomic_long_read(&po->mapped))
goto out;
if (packet_read_pending(rb))
goto out;
@@ -4508,7 +4508,7 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
err = -EBUSY;
mutex_lock(&po->pg_vec_lock);
- if (closing || atomic_read(&po->mapped) == 0) {
+ if (closing || atomic_long_read(&po->mapped) == 0) {
err = 0;
spin_lock_bh(&rb_queue->lock);
swap(rb->pg_vec, pg_vec);
@@ -4526,9 +4526,9 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
po->prot_hook.func = (po->rx_ring.pg_vec) ?
tpacket_rcv : packet_rcv;
skb_queue_purge(rb_queue);
- if (atomic_read(&po->mapped))
- pr_err("packet_mmap: vma is busy: %d\n",
- atomic_read(&po->mapped));
+ if (atomic_long_read(&po->mapped))
+ pr_err("packet_mmap: vma is busy: %ld\n",
+ atomic_long_read(&po->mapped));
}
mutex_unlock(&po->pg_vec_lock);
@@ -4606,7 +4606,7 @@ static int packet_mmap(struct file *file, struct socket *sock,
}
}
- atomic_inc(&po->mapped);
+ atomic_long_inc(&po->mapped);
vma->vm_ops = &packet_mmap_ops;
err = 0;
diff --git a/net/packet/internal.h b/net/packet/internal.h
index d29c94c45159..d5d70712007a 100644
--- a/net/packet/internal.h
+++ b/net/packet/internal.h
@@ -122,7 +122,7 @@ struct packet_sock {
__be16 num;
struct packet_rollover *rollover;
struct packet_mclist *mclist;
- atomic_t mapped;
+ atomic_long_t mapped;
enum tpacket_versions tp_version;
unsigned int tp_hdrlen;
unsigned int tp_reserve;
diff --git a/net/psample/psample.c b/net/psample/psample.c
index 81a794e36f53..c34e902855db 100644
--- a/net/psample/psample.c
+++ b/net/psample/psample.c
@@ -31,7 +31,8 @@ enum psample_nl_multicast_groups {
static const struct genl_multicast_group psample_nl_mcgrps[] = {
[PSAMPLE_NL_MCGRP_CONFIG] = { .name = PSAMPLE_NL_MCGRP_CONFIG_NAME },
- [PSAMPLE_NL_MCGRP_SAMPLE] = { .name = PSAMPLE_NL_MCGRP_SAMPLE_NAME },
+ [PSAMPLE_NL_MCGRP_SAMPLE] = { .name = PSAMPLE_NL_MCGRP_SAMPLE_NAME,
+ .flags = GENL_UNS_ADMIN_PERM },
};
static struct genl_family psample_nl_family __ro_after_init;
diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c
index 2a1388841951..73eebddbbf41 100644
--- a/net/smc/af_smc.c
+++ b/net/smc/af_smc.c
@@ -723,7 +723,7 @@ static void smcd_conn_save_peer_info(struct smc_sock *smc,
int bufsize = smc_uncompress_bufsize(clc->d0.dmbe_size);
smc->conn.peer_rmbe_idx = clc->d0.dmbe_idx;
- smc->conn.peer_token = clc->d0.token;
+ smc->conn.peer_token = ntohll(clc->d0.token);
/* msg header takes up space in the buffer */
smc->conn.peer_rmbe_size = bufsize - sizeof(struct smcd_cdc_msg);
atomic_set(&smc->conn.peer_rmbe_space, smc->conn.peer_rmbe_size);
@@ -1415,7 +1415,7 @@ static int smc_connect_ism(struct smc_sock *smc,
if (rc)
return rc;
}
- ini->ism_peer_gid[ini->ism_selected] = aclc->d0.gid;
+ ini->ism_peer_gid[ini->ism_selected] = ntohll(aclc->d0.gid);
/* there is only one lgr role for SMC-D; use server lock */
mutex_lock(&smc_server_lgr_pending);
diff --git a/net/smc/smc_clc.c b/net/smc/smc_clc.c
index 8deb46c28f1d..72f4d81a3f41 100644
--- a/net/smc/smc_clc.c
+++ b/net/smc/smc_clc.c
@@ -1004,6 +1004,7 @@ static int smc_clc_send_confirm_accept(struct smc_sock *smc,
{
struct smc_connection *conn = &smc->conn;
struct smc_clc_first_contact_ext_v2x fce;
+ struct smcd_dev *smcd = conn->lgr->smcd;
struct smc_clc_msg_accept_confirm *clc;
struct smc_clc_fce_gid_ext gle;
struct smc_clc_msg_trail trl;
@@ -1021,17 +1022,15 @@ static int smc_clc_send_confirm_accept(struct smc_sock *smc,
memcpy(clc->hdr.eyecatcher, SMCD_EYECATCHER,
sizeof(SMCD_EYECATCHER));
clc->hdr.typev1 = SMC_TYPE_D;
- clc->d0.gid =
- conn->lgr->smcd->ops->get_local_gid(conn->lgr->smcd);
- clc->d0.token = conn->rmb_desc->token;
+ clc->d0.gid = htonll(smcd->ops->get_local_gid(smcd));
+ clc->d0.token = htonll(conn->rmb_desc->token);
clc->d0.dmbe_size = conn->rmbe_size_comp;
clc->d0.dmbe_idx = 0;
memcpy(&clc->d0.linkid, conn->lgr->id, SMC_LGR_ID_SIZE);
if (version == SMC_V1) {
clc->hdr.length = htons(SMCD_CLC_ACCEPT_CONFIRM_LEN);
} else {
- clc_v2->d1.chid =
- htons(smc_ism_get_chid(conn->lgr->smcd));
+ clc_v2->d1.chid = htons(smc_ism_get_chid(smcd));
if (eid && eid[0])
memcpy(clc_v2->d1.eid, eid, SMC_MAX_EID_LEN);
len = SMCD_CLC_ACCEPT_CONFIRM_LEN_V2;
diff --git a/net/smc/smc_clc.h b/net/smc/smc_clc.h
index c5c8e7db775a..08155a96a02a 100644
--- a/net/smc/smc_clc.h
+++ b/net/smc/smc_clc.h
@@ -204,8 +204,8 @@ struct smcr_clc_msg_accept_confirm { /* SMCR accept/confirm */
} __packed;
struct smcd_clc_msg_accept_confirm_common { /* SMCD accept/confirm */
- u64 gid; /* Sender GID */
- u64 token; /* DMB token */
+ __be64 gid; /* Sender GID */
+ __be64 token; /* DMB token */
u8 dmbe_idx; /* DMBE index */
#if defined(__BIG_ENDIAN_BITFIELD)
u8 dmbe_size : 4, /* buf size (compressed) */
diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c
index 316f76187962..e37b4d2e2acd 100644
--- a/net/tls/tls_sw.c
+++ b/net/tls/tls_sw.c
@@ -952,6 +952,8 @@ static int tls_sw_sendmsg_splice(struct sock *sk, struct msghdr *msg,
}
sk_msg_page_add(msg_pl, page, part, off);
+ msg_pl->sg.copybreak = 0;
+ msg_pl->sg.curr = msg_pl->sg.end;
sk_mem_charge(sk, part);
*copied += part;
try_to_copy -= part;
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index a357dc5f2404..ac1f2bc18fc9 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -213,8 +213,6 @@ static inline bool unix_secdata_eq(struct scm_cookie *scm, struct sk_buff *skb)
}
#endif /* CONFIG_SECURITY_NETWORK */
-#define unix_peer(sk) (unix_sk(sk)->peer)
-
static inline int unix_our_peer(struct sock *sk, struct sock *osk)
{
return unix_peer(osk) == sk;
diff --git a/net/unix/unix_bpf.c b/net/unix/unix_bpf.c
index 2f9d8271c6ec..7ea7c3a0d0d0 100644
--- a/net/unix/unix_bpf.c
+++ b/net/unix/unix_bpf.c
@@ -159,12 +159,17 @@ int unix_dgram_bpf_update_proto(struct sock *sk, struct sk_psock *psock, bool re
int unix_stream_bpf_update_proto(struct sock *sk, struct sk_psock *psock, bool restore)
{
+ struct sock *sk_pair;
+
if (restore) {
sk->sk_write_space = psock->saved_write_space;
sock_replace_proto(sk, psock->sk_proto);
return 0;
}
+ sk_pair = unix_peer(sk);
+ sock_hold(sk_pair);
+ psock->sk_pair = sk_pair;
unix_stream_bpf_check_needs_rebuild(psock->sk_proto);
sock_replace_proto(sk, &unix_stream_bpf_prot);
return 0;
diff --git a/net/vmw_vsock/virtio_transport_common.c b/net/vmw_vsock/virtio_transport_common.c
index f6dc896bf44c..c8e162c9d1df 100644
--- a/net/vmw_vsock/virtio_transport_common.c
+++ b/net/vmw_vsock/virtio_transport_common.c
@@ -59,8 +59,7 @@ static bool virtio_transport_can_zcopy(const struct virtio_transport *t_ops,
t_ops = virtio_transport_get_ops(info->vsk);
if (t_ops->can_msgzerocopy) {
- int pages_in_iov = iov_iter_npages(iov_iter, MAX_SKB_FRAGS);
- int pages_to_send = min(pages_in_iov, MAX_SKB_FRAGS);
+ int pages_to_send = iov_iter_npages(iov_iter, MAX_SKB_FRAGS);
/* +1 is for packet header. */
return t_ops->can_msgzerocopy(pages_to_send + 1);
diff --git a/net/wireless/core.c b/net/wireless/core.c
index 758c9a2a12c0..409d74c57ca0 100644
--- a/net/wireless/core.c
+++ b/net/wireless/core.c
@@ -191,13 +191,13 @@ int cfg80211_switch_netns(struct cfg80211_registered_device *rdev,
return err;
}
+ wiphy_lock(&rdev->wiphy);
list_for_each_entry(wdev, &rdev->wiphy.wdev_list, list) {
if (!wdev->netdev)
continue;
nl80211_notify_iface(rdev, wdev, NL80211_CMD_DEL_INTERFACE);
}
- wiphy_lock(&rdev->wiphy);
nl80211_notify_wiphy(rdev, NL80211_CMD_DEL_WIPHY);
wiphy_net_set(&rdev->wiphy, net);
@@ -206,13 +206,13 @@ int cfg80211_switch_netns(struct cfg80211_registered_device *rdev,
WARN_ON(err);
nl80211_notify_wiphy(rdev, NL80211_CMD_NEW_WIPHY);
- wiphy_unlock(&rdev->wiphy);
list_for_each_entry(wdev, &rdev->wiphy.wdev_list, list) {
if (!wdev->netdev)
continue;
nl80211_notify_iface(rdev, wdev, NL80211_CMD_NEW_INTERFACE);
}
+ wiphy_unlock(&rdev->wiphy);
return 0;
}
@@ -221,7 +221,9 @@ static void cfg80211_rfkill_poll(struct rfkill *rfkill, void *data)
{
struct cfg80211_registered_device *rdev = data;
+ wiphy_lock(&rdev->wiphy);
rdev_rfkill_poll(rdev);
+ wiphy_unlock(&rdev->wiphy);
}
void cfg80211_stop_p2p_device(struct cfg80211_registered_device *rdev,
diff --git a/net/wireless/core.h b/net/wireless/core.h
index 4c692c7faf30..cb61d33d4f1e 100644
--- a/net/wireless/core.h
+++ b/net/wireless/core.h
@@ -293,6 +293,7 @@ struct cfg80211_cqm_config {
u32 rssi_hyst;
s32 last_rssi_event_value;
enum nl80211_cqm_rssi_threshold_event last_rssi_event_type;
+ bool use_range_api;
int n_rssi_thresholds;
s32 rssi_thresholds[] __counted_by(n_rssi_thresholds);
};
diff --git a/net/wireless/debugfs.c b/net/wireless/debugfs.c
index 0878b162890a..40e49074e2ee 100644
--- a/net/wireless/debugfs.c
+++ b/net/wireless/debugfs.c
@@ -4,6 +4,7 @@
*
* Copyright 2009 Luis R. Rodriguez <[email protected]>
* Copyright 2007 Johannes Berg <[email protected]>
+ * Copyright (C) 2023 Intel Corporation
*/
#include <linux/slab.h>
@@ -109,3 +110,162 @@ void cfg80211_debugfs_rdev_add(struct cfg80211_registered_device *rdev)
DEBUGFS_ADD(long_retry_limit);
DEBUGFS_ADD(ht40allow_map);
}
+
+struct debugfs_read_work {
+ struct wiphy_work work;
+ ssize_t (*handler)(struct wiphy *wiphy,
+ struct file *file,
+ char *buf,
+ size_t count,
+ void *data);
+ struct wiphy *wiphy;
+ struct file *file;
+ char *buf;
+ size_t bufsize;
+ void *data;
+ ssize_t ret;
+ struct completion completion;
+};
+
+static void wiphy_locked_debugfs_read_work(struct wiphy *wiphy,
+ struct wiphy_work *work)
+{
+ struct debugfs_read_work *w = container_of(work, typeof(*w), work);
+
+ w->ret = w->handler(w->wiphy, w->file, w->buf, w->bufsize, w->data);
+ complete(&w->completion);
+}
+
+static void wiphy_locked_debugfs_read_cancel(struct dentry *dentry,
+ void *data)
+{
+ struct debugfs_read_work *w = data;
+
+ wiphy_work_cancel(w->wiphy, &w->work);
+ complete(&w->completion);
+}
+
+ssize_t wiphy_locked_debugfs_read(struct wiphy *wiphy, struct file *file,
+ char *buf, size_t bufsize,
+ char __user *userbuf, size_t count,
+ loff_t *ppos,
+ ssize_t (*handler)(struct wiphy *wiphy,
+ struct file *file,
+ char *buf,
+ size_t bufsize,
+ void *data),
+ void *data)
+{
+ struct debugfs_read_work work = {
+ .handler = handler,
+ .wiphy = wiphy,
+ .file = file,
+ .buf = buf,
+ .bufsize = bufsize,
+ .data = data,
+ .ret = -ENODEV,
+ .completion = COMPLETION_INITIALIZER_ONSTACK(work.completion),
+ };
+ struct debugfs_cancellation cancellation = {
+ .cancel = wiphy_locked_debugfs_read_cancel,
+ .cancel_data = &work,
+ };
+
+ /* don't leak stack data or whatever */
+ memset(buf, 0, bufsize);
+
+ wiphy_work_init(&work.work, wiphy_locked_debugfs_read_work);
+ wiphy_work_queue(wiphy, &work.work);
+
+ debugfs_enter_cancellation(file, &cancellation);
+ wait_for_completion(&work.completion);
+ debugfs_leave_cancellation(file, &cancellation);
+
+ if (work.ret < 0)
+ return work.ret;
+
+ if (WARN_ON(work.ret > bufsize))
+ return -EINVAL;
+
+ return simple_read_from_buffer(userbuf, count, ppos, buf, work.ret);
+}
+EXPORT_SYMBOL_GPL(wiphy_locked_debugfs_read);
+
+struct debugfs_write_work {
+ struct wiphy_work work;
+ ssize_t (*handler)(struct wiphy *wiphy,
+ struct file *file,
+ char *buf,
+ size_t count,
+ void *data);
+ struct wiphy *wiphy;
+ struct file *file;
+ char *buf;
+ size_t count;
+ void *data;
+ ssize_t ret;
+ struct completion completion;
+};
+
+static void wiphy_locked_debugfs_write_work(struct wiphy *wiphy,
+ struct wiphy_work *work)
+{
+ struct debugfs_write_work *w = container_of(work, typeof(*w), work);
+
+ w->ret = w->handler(w->wiphy, w->file, w->buf, w->count, w->data);
+ complete(&w->completion);
+}
+
+static void wiphy_locked_debugfs_write_cancel(struct dentry *dentry,
+ void *data)
+{
+ struct debugfs_write_work *w = data;
+
+ wiphy_work_cancel(w->wiphy, &w->work);
+ complete(&w->completion);
+}
+
+ssize_t wiphy_locked_debugfs_write(struct wiphy *wiphy,
+ struct file *file, char *buf, size_t bufsize,
+ const char __user *userbuf, size_t count,
+ ssize_t (*handler)(struct wiphy *wiphy,
+ struct file *file,
+ char *buf,
+ size_t count,
+ void *data),
+ void *data)
+{
+ struct debugfs_write_work work = {
+ .handler = handler,
+ .wiphy = wiphy,
+ .file = file,
+ .buf = buf,
+ .count = count,
+ .data = data,
+ .ret = -ENODEV,
+ .completion = COMPLETION_INITIALIZER_ONSTACK(work.completion),
+ };
+ struct debugfs_cancellation cancellation = {
+ .cancel = wiphy_locked_debugfs_write_cancel,
+ .cancel_data = &work,
+ };
+
+ /* mostly used for strings so enforce NUL-termination for safety */
+ if (count >= bufsize)
+ return -EINVAL;
+
+ memset(buf, 0, bufsize);
+
+ if (copy_from_user(buf, userbuf, count))
+ return -EFAULT;
+
+ wiphy_work_init(&work.work, wiphy_locked_debugfs_write_work);
+ wiphy_work_queue(wiphy, &work.work);
+
+ debugfs_enter_cancellation(file, &cancellation);
+ wait_for_completion(&work.completion);
+ debugfs_leave_cancellation(file, &cancellation);
+
+ return work.ret;
+}
+EXPORT_SYMBOL_GPL(wiphy_locked_debugfs_write);
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 569234bc2be6..1cbbb11ea503 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -3822,6 +3822,8 @@ static int nl80211_send_iface(struct sk_buff *msg, u32 portid, u32 seq, int flag
struct net_device *dev = wdev->netdev;
void *hdr;
+ lockdep_assert_wiphy(&rdev->wiphy);
+
WARN_ON(cmd != NL80211_CMD_NEW_INTERFACE &&
cmd != NL80211_CMD_DEL_INTERFACE &&
cmd != NL80211_CMD_SET_INTERFACE);
@@ -3989,6 +3991,7 @@ static int nl80211_dump_interface(struct sk_buff *skb, struct netlink_callback *
if_idx = 0;
+ wiphy_lock(&rdev->wiphy);
list_for_each_entry(wdev, &rdev->wiphy.wdev_list, list) {
if (if_idx < if_start) {
if_idx++;
@@ -3998,10 +4001,12 @@ static int nl80211_dump_interface(struct sk_buff *skb, struct netlink_callback *
cb->nlh->nlmsg_seq, NLM_F_MULTI,
rdev, wdev,
NL80211_CMD_NEW_INTERFACE) < 0) {
+ wiphy_unlock(&rdev->wiphy);
goto out;
}
if_idx++;
}
+ wiphy_unlock(&rdev->wiphy);
wp_idx++;
}
@@ -12787,10 +12792,6 @@ static int cfg80211_cqm_rssi_update(struct cfg80211_registered_device *rdev,
int i, n, low_index;
int err;
- /* RSSI reporting disabled? */
- if (!cqm_config)
- return rdev_set_cqm_rssi_range_config(rdev, dev, 0, 0);
-
/*
* Obtain current RSSI value if possible, if not and no RSSI threshold
* event has been received yet, we should receive an event after a
@@ -12865,23 +12866,25 @@ static int nl80211_set_cqm_rssi(struct genl_info *info,
wdev->iftype != NL80211_IFTYPE_P2P_CLIENT)
return -EOPNOTSUPP;
- if (n_thresholds <= 1 && rdev->ops->set_cqm_rssi_config) {
- if (n_thresholds == 0 || thresholds[0] == 0) /* Disabling */
- return rdev_set_cqm_rssi_config(rdev, dev, 0, 0);
-
- return rdev_set_cqm_rssi_config(rdev, dev,
- thresholds[0], hysteresis);
- }
-
- if (!wiphy_ext_feature_isset(&rdev->wiphy,
- NL80211_EXT_FEATURE_CQM_RSSI_LIST))
- return -EOPNOTSUPP;
-
if (n_thresholds == 1 && thresholds[0] == 0) /* Disabling */
n_thresholds = 0;
old = wiphy_dereference(wdev->wiphy, wdev->cqm_config);
+ /* if already disabled just succeed */
+ if (!n_thresholds && !old)
+ return 0;
+
+ if (n_thresholds > 1) {
+ if (!wiphy_ext_feature_isset(&rdev->wiphy,
+ NL80211_EXT_FEATURE_CQM_RSSI_LIST) ||
+ !rdev->ops->set_cqm_rssi_range_config)
+ return -EOPNOTSUPP;
+ } else {
+ if (!rdev->ops->set_cqm_rssi_config)
+ return -EOPNOTSUPP;
+ }
+
if (n_thresholds) {
cqm_config = kzalloc(struct_size(cqm_config, rssi_thresholds,
n_thresholds),
@@ -12894,13 +12897,26 @@ static int nl80211_set_cqm_rssi(struct genl_info *info,
memcpy(cqm_config->rssi_thresholds, thresholds,
flex_array_size(cqm_config, rssi_thresholds,
n_thresholds));
+ cqm_config->use_range_api = n_thresholds > 1 ||
+ !rdev->ops->set_cqm_rssi_config;
rcu_assign_pointer(wdev->cqm_config, cqm_config);
+
+ if (cqm_config->use_range_api)
+ err = cfg80211_cqm_rssi_update(rdev, dev, cqm_config);
+ else
+ err = rdev_set_cqm_rssi_config(rdev, dev,
+ thresholds[0],
+ hysteresis);
} else {
RCU_INIT_POINTER(wdev->cqm_config, NULL);
+ /* if enabled as range also disable via range */
+ if (old->use_range_api)
+ err = rdev_set_cqm_rssi_range_config(rdev, dev, 0, 0);
+ else
+ err = rdev_set_cqm_rssi_config(rdev, dev, 0, 0);
}
- err = cfg80211_cqm_rssi_update(rdev, dev, cqm_config);
if (err) {
rcu_assign_pointer(wdev->cqm_config, old);
kfree_rcu(cqm_config, rcu_head);
@@ -19009,10 +19025,11 @@ void cfg80211_cqm_rssi_notify_work(struct wiphy *wiphy, struct wiphy_work *work)
s32 rssi_level;
cqm_config = wiphy_dereference(wdev->wiphy, wdev->cqm_config);
- if (!wdev->cqm_config)
+ if (!cqm_config)
return;
- cfg80211_cqm_rssi_update(rdev, wdev->netdev, cqm_config);
+ if (cqm_config->use_range_api)
+ cfg80211_cqm_rssi_update(rdev, wdev->netdev, cqm_config);
rssi_level = cqm_config->last_rssi_event_value;
rssi_event = cqm_config->last_rssi_event_type;
diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c
index ae9f8cb611f6..3da0b52f308d 100644
--- a/net/xdp/xsk.c
+++ b/net/xdp/xsk.c
@@ -947,7 +947,7 @@ static __poll_t xsk_poll(struct file *file, struct socket *sock,
rcu_read_lock();
if (xsk_check_common(xs))
- goto skip_tx;
+ goto out;
pool = xs->pool;
@@ -959,12 +959,11 @@ static __poll_t xsk_poll(struct file *file, struct socket *sock,
xsk_generic_xmit(sk);
}
-skip_tx:
if (xs->rx && !xskq_prod_is_empty(xs->rx))
mask |= EPOLLIN | EPOLLRDNORM;
if (xs->tx && xsk_tx_writeable(xs))
mask |= EPOLLOUT | EPOLLWRNORM;
-
+out:
rcu_read_unlock();
return mask;
}
diff --git a/scripts/checkstack.pl b/scripts/checkstack.pl
index d83ba5d8f3f4..f27d552aec43 100755
--- a/scripts/checkstack.pl
+++ b/scripts/checkstack.pl
@@ -138,15 +138,11 @@ $total_size = 0;
while (my $line = <STDIN>) {
if ($line =~ m/$funcre/) {
$func = $1;
- next if $line !~ m/^($xs*)/;
+ next if $line !~ m/^($x*)/;
if ($total_size > $min_stack) {
push @stack, "$intro$total_size\n";
}
-
- $addr = $1;
- $addr =~ s/ /0/g;
- $addr = "0x$addr";
-
+ $addr = "0x$1";
$intro = "$addr $func [$file]:";
my $padlen = 56 - length($intro);
while ($padlen > 0) {
diff --git a/scripts/dtc/dt-extract-compatibles b/scripts/dtc/dt-extract-compatibles
index bd07477dd144..5ffb2364409b 100755
--- a/scripts/dtc/dt-extract-compatibles
+++ b/scripts/dtc/dt-extract-compatibles
@@ -1,8 +1,8 @@
#!/usr/bin/env python3
# SPDX-License-Identifier: GPL-2.0-only
+import fnmatch
import os
-import glob
import re
import argparse
@@ -81,10 +81,20 @@ def print_compat(filename, compatibles):
else:
print(*compatibles, sep='\n')
+def glob_without_symlinks(root, glob):
+ for path, dirs, files in os.walk(root):
+ # Ignore hidden directories
+ for d in dirs:
+ if fnmatch.fnmatch(d, ".*"):
+ dirs.remove(d)
+ for f in files:
+ if fnmatch.fnmatch(f, glob):
+ yield os.path.join(path, f)
+
def files_to_parse(path_args):
for f in path_args:
if os.path.isdir(f):
- for filename in glob.iglob(f + "/**/*.c", recursive=True):
+ for filename in glob_without_symlinks(f, "*.c"):
yield filename
else:
yield f
diff --git a/scripts/gcc-plugins/randomize_layout_plugin.c b/scripts/gcc-plugins/randomize_layout_plugin.c
index 910bd21d08f4..746ff2d272f2 100644
--- a/scripts/gcc-plugins/randomize_layout_plugin.c
+++ b/scripts/gcc-plugins/randomize_layout_plugin.c
@@ -339,8 +339,7 @@ static int relayout_struct(tree type)
/*
* enforce that we don't randomize the layout of the last
- * element of a struct if it's a 0 or 1-length array
- * or a proper flexible array
+ * element of a struct if it's a proper flexible array
*/
if (is_flexible_array(newtree[num_fields - 1])) {
has_flexarray = true;
diff --git a/scripts/gdb/linux/device.py b/scripts/gdb/linux/device.py
index 16376c5cfec6..0eabc5f4f8ca 100644
--- a/scripts/gdb/linux/device.py
+++ b/scripts/gdb/linux/device.py
@@ -36,26 +36,26 @@ def for_each_bus():
for kobj in kset_for_each_object(gdb.parse_and_eval('bus_kset')):
subsys = container_of(kobj, kset_type.get_type().pointer(), 'kobj')
subsys_priv = container_of(subsys, subsys_private_type.get_type().pointer(), 'subsys')
- yield subsys_priv['bus']
+ yield subsys_priv
def for_each_class():
for kobj in kset_for_each_object(gdb.parse_and_eval('class_kset')):
subsys = container_of(kobj, kset_type.get_type().pointer(), 'kobj')
subsys_priv = container_of(subsys, subsys_private_type.get_type().pointer(), 'subsys')
- yield subsys_priv['class']
+ yield subsys_priv
def get_bus_by_name(name):
for item in for_each_bus():
- if item['name'].string() == name:
+ if item['bus']['name'].string() == name:
return item
raise gdb.GdbError("Can't find bus type {!r}".format(name))
def get_class_by_name(name):
for item in for_each_class():
- if item['name'].string() == name:
+ if item['class']['name'].string() == name:
return item
raise gdb.GdbError("Can't find device class {!r}".format(name))
@@ -70,13 +70,13 @@ def klist_for_each(klist):
def bus_for_each_device(bus):
- for kn in klist_for_each(bus['p']['klist_devices']):
+ for kn in klist_for_each(bus['klist_devices']):
dp = container_of(kn, device_private_type.get_type().pointer(), 'knode_bus')
yield dp['device']
def class_for_each_device(cls):
- for kn in klist_for_each(cls['p']['klist_devices']):
+ for kn in klist_for_each(cls['klist_devices']):
dp = container_of(kn, device_private_type.get_type().pointer(), 'knode_class')
yield dp['device']
@@ -103,7 +103,7 @@ class LxDeviceListBus(gdb.Command):
def invoke(self, arg, from_tty):
if not arg:
for bus in for_each_bus():
- gdb.write('bus {}:\t{}\n'.format(bus['name'].string(), bus))
+ gdb.write('bus {}:\t{}\n'.format(bus['bus']['name'].string(), bus))
for dev in bus_for_each_device(bus):
_show_device(dev, level=1)
else:
@@ -123,7 +123,7 @@ class LxDeviceListClass(gdb.Command):
def invoke(self, arg, from_tty):
if not arg:
for cls in for_each_class():
- gdb.write("class {}:\t{}\n".format(cls['name'].string(), cls))
+ gdb.write("class {}:\t{}\n".format(cls['class']['name'].string(), cls))
for dev in class_for_each_device(cls):
_show_device(dev, level=1)
else:
diff --git a/scripts/gdb/linux/tasks.py b/scripts/gdb/linux/tasks.py
index 17ec19e9b5bf..aa5ab6251f76 100644
--- a/scripts/gdb/linux/tasks.py
+++ b/scripts/gdb/linux/tasks.py
@@ -13,7 +13,7 @@
import gdb
-from linux import utils
+from linux import utils, lists
task_type = utils.CachedType("struct task_struct")
@@ -22,19 +22,15 @@ task_type = utils.CachedType("struct task_struct")
def task_lists():
task_ptr_type = task_type.get_type().pointer()
init_task = gdb.parse_and_eval("init_task").address
- t = g = init_task
+ t = init_task
while True:
- while True:
- yield t
+ thread_head = t['signal']['thread_head']
+ for thread in lists.list_for_each_entry(thread_head, task_ptr_type, 'thread_node'):
+ yield thread
- t = utils.container_of(t['thread_group']['next'],
- task_ptr_type, "thread_group")
- if t == g:
- break
-
- t = g = utils.container_of(g['tasks']['next'],
- task_ptr_type, "tasks")
+ t = utils.container_of(t['tasks']['next'],
+ task_ptr_type, "tasks")
if t == init_task:
return
diff --git a/sound/core/pcm.c b/sound/core/pcm.c
index 20bb2d7c8d4b..6d0c9c37796c 100644
--- a/sound/core/pcm.c
+++ b/sound/core/pcm.c
@@ -253,6 +253,7 @@ static const char * const snd_pcm_state_names[] = {
STATE(DRAINING),
STATE(PAUSED),
STATE(SUSPENDED),
+ STATE(DISCONNECTED),
};
static const char * const snd_pcm_access_names[] = {
diff --git a/sound/drivers/pcmtest.c b/sound/drivers/pcmtest.c
index b59b78a09224..b8bff5522bce 100644
--- a/sound/drivers/pcmtest.c
+++ b/sound/drivers/pcmtest.c
@@ -397,7 +397,6 @@ static int snd_pcmtst_pcm_close(struct snd_pcm_substream *substream)
struct pcmtst_buf_iter *v_iter = substream->runtime->private_data;
timer_shutdown_sync(&v_iter->timer_instance);
- v_iter->substream = NULL;
playback_capture_test = !v_iter->is_buf_corrupted;
kfree(v_iter);
return 0;
@@ -435,6 +434,7 @@ static int snd_pcmtst_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
// We can't call timer_shutdown_sync here, as it is forbidden to sleep here
v_iter->suspend = true;
+ timer_delete(&v_iter->timer_instance);
break;
}
@@ -512,12 +512,22 @@ static int snd_pcmtst_ioctl(struct snd_pcm_substream *substream, unsigned int cm
return snd_pcm_lib_ioctl(substream, cmd, arg);
}
+static int snd_pcmtst_sync_stop(struct snd_pcm_substream *substream)
+{
+ struct pcmtst_buf_iter *v_iter = substream->runtime->private_data;
+
+ timer_delete_sync(&v_iter->timer_instance);
+
+ return 0;
+}
+
static const struct snd_pcm_ops snd_pcmtst_playback_ops = {
.open = snd_pcmtst_pcm_open,
.close = snd_pcmtst_pcm_close,
.trigger = snd_pcmtst_pcm_trigger,
.hw_params = snd_pcmtst_pcm_hw_params,
.ioctl = snd_pcmtst_ioctl,
+ .sync_stop = snd_pcmtst_sync_stop,
.hw_free = snd_pcmtst_pcm_hw_free,
.prepare = snd_pcmtst_pcm_prepare,
.pointer = snd_pcmtst_pcm_pointer,
@@ -530,6 +540,7 @@ static const struct snd_pcm_ops snd_pcmtst_capture_ops = {
.hw_params = snd_pcmtst_pcm_hw_params,
.hw_free = snd_pcmtst_pcm_hw_free,
.ioctl = snd_pcmtst_ioctl,
+ .sync_stop = snd_pcmtst_sync_stop,
.prepare = snd_pcmtst_pcm_prepare,
.pointer = snd_pcmtst_pcm_pointer,
};
diff --git a/sound/hda/intel-nhlt.c b/sound/hda/intel-nhlt.c
index 2c4dfc0b7e34..696a958d93e9 100644
--- a/sound/hda/intel-nhlt.c
+++ b/sound/hda/intel-nhlt.c
@@ -238,7 +238,7 @@ EXPORT_SYMBOL(intel_nhlt_ssp_mclk_mask);
static struct nhlt_specific_cfg *
nhlt_get_specific_cfg(struct device *dev, struct nhlt_fmt *fmt, u8 num_ch,
- u32 rate, u8 vbps, u8 bps)
+ u32 rate, u8 vbps, u8 bps, bool ignore_vbps)
{
struct nhlt_fmt_cfg *cfg = fmt->fmt_config;
struct wav_fmt *wfmt;
@@ -255,8 +255,12 @@ nhlt_get_specific_cfg(struct device *dev, struct nhlt_fmt *fmt, u8 num_ch,
dev_dbg(dev, "Endpoint format: ch=%d fmt=%d/%d rate=%d\n",
wfmt->channels, _vbps, _bps, wfmt->samples_per_sec);
+ /*
+ * When looking for exact match of configuration ignore the vbps
+ * from NHLT table when ignore_vbps is true
+ */
if (wfmt->channels == num_ch && wfmt->samples_per_sec == rate &&
- vbps == _vbps && bps == _bps)
+ (ignore_vbps || vbps == _vbps) && bps == _bps)
return &cfg->config;
cfg = (struct nhlt_fmt_cfg *)(cfg->config.caps + cfg->config.size);
@@ -289,6 +293,7 @@ intel_nhlt_get_endpoint_blob(struct device *dev, struct nhlt_acpi_table *nhlt,
{
struct nhlt_specific_cfg *cfg;
struct nhlt_endpoint *epnt;
+ bool ignore_vbps = false;
struct nhlt_fmt *fmt;
int i;
@@ -298,7 +303,26 @@ intel_nhlt_get_endpoint_blob(struct device *dev, struct nhlt_acpi_table *nhlt,
dev_dbg(dev, "Looking for configuration:\n");
dev_dbg(dev, " vbus_id=%d link_type=%d dir=%d, dev_type=%d\n",
bus_id, link_type, dir, dev_type);
- dev_dbg(dev, " ch=%d fmt=%d/%d rate=%d\n", num_ch, vbps, bps, rate);
+ if (link_type == NHLT_LINK_DMIC && bps == 32 && (vbps == 24 || vbps == 32)) {
+ /*
+ * The DMIC hardware supports only one type of 32 bits sample
+ * size, which is 24 bit sampling on the MSB side and bits[1:0]
+ * are used for indicating the channel number.
+ * It has been observed that some NHLT tables have the vbps
+ * specified as 32 while some uses 24.
+ * The format these variations describe are identical, the
+ * hardware is configured and behaves the same way.
+ * Note: when the samples assumed to be vbps=32 then the 'noise'
+ * introduced by the lower two bits (channel number) have no
+ * real life implication on audio quality.
+ */
+ dev_dbg(dev,
+ " ch=%d fmt=%d rate=%d (vbps is ignored for DMIC 32bit format)\n",
+ num_ch, bps, rate);
+ ignore_vbps = true;
+ } else {
+ dev_dbg(dev, " ch=%d fmt=%d/%d rate=%d\n", num_ch, vbps, bps, rate);
+ }
dev_dbg(dev, "Endpoint count=%d\n", nhlt->endpoint_count);
epnt = (struct nhlt_endpoint *)nhlt->desc;
@@ -307,7 +331,8 @@ intel_nhlt_get_endpoint_blob(struct device *dev, struct nhlt_acpi_table *nhlt,
if (nhlt_check_ep_match(dev, epnt, bus_id, link_type, dir, dev_type)) {
fmt = (struct nhlt_fmt *)(epnt->config.caps + epnt->config.size);
- cfg = nhlt_get_specific_cfg(dev, fmt, num_ch, rate, vbps, bps);
+ cfg = nhlt_get_specific_cfg(dev, fmt, num_ch, rate,
+ vbps, bps, ignore_vbps);
if (cfg)
return cfg;
}
diff --git a/sound/pci/hda/cs35l41_hda.c b/sound/pci/hda/cs35l41_hda.c
index b2db8091f0ed..cbd7d8badf91 100644
--- a/sound/pci/hda/cs35l41_hda.c
+++ b/sound/pci/hda/cs35l41_hda.c
@@ -504,7 +504,6 @@ static void cs35l41_shutdown_dsp(struct cs35l41_hda *cs35l41)
cs_dsp_stop(dsp);
cs_dsp_power_down(dsp);
- cs35l41->firmware_running = false;
dev_dbg(cs35l41->dev, "Unloaded Firmware\n");
}
@@ -550,7 +549,7 @@ static void cs35l41_hda_play_start(struct device *dev)
cs35l41->playback_started = true;
- if (cs35l41->firmware_running) {
+ if (cs35l41->cs_dsp.running) {
regmap_multi_reg_write(reg, cs35l41_hda_config_dsp,
ARRAY_SIZE(cs35l41_hda_config_dsp));
regmap_update_bits(reg, CS35L41_PWR_CTRL2,
@@ -580,7 +579,7 @@ static void cs35l41_mute(struct device *dev, bool mute)
regmap_multi_reg_write(reg, cs35l41_hda_mute, ARRAY_SIZE(cs35l41_hda_mute));
} else {
dev_dbg(dev, "Unmuting\n");
- if (cs35l41->firmware_running) {
+ if (cs35l41->cs_dsp.running) {
regmap_multi_reg_write(reg, cs35l41_hda_unmute_dsp,
ARRAY_SIZE(cs35l41_hda_unmute_dsp));
} else {
@@ -599,7 +598,7 @@ static void cs35l41_hda_play_done(struct device *dev)
dev_dbg(dev, "Play (Complete)\n");
cs35l41_global_enable(dev, reg, cs35l41->hw_cfg.bst_type, 1,
- cs35l41->firmware_running);
+ &cs35l41->cs_dsp);
cs35l41_mute(dev, false);
}
@@ -612,7 +611,7 @@ static void cs35l41_hda_pause_start(struct device *dev)
cs35l41_mute(dev, true);
cs35l41_global_enable(dev, reg, cs35l41->hw_cfg.bst_type, 0,
- cs35l41->firmware_running);
+ &cs35l41->cs_dsp);
}
static void cs35l41_hda_pause_done(struct device *dev)
@@ -625,7 +624,7 @@ static void cs35l41_hda_pause_done(struct device *dev)
regmap_update_bits(reg, CS35L41_PWR_CTRL2, CS35L41_AMP_EN_MASK, 0 << CS35L41_AMP_EN_SHIFT);
if (cs35l41->hw_cfg.bst_type == CS35L41_EXT_BOOST)
regmap_write(reg, CS35L41_GPIO1_CTRL1, 0x00000001);
- if (cs35l41->firmware_running) {
+ if (cs35l41->cs_dsp.running) {
cs35l41_set_cspl_mbox_cmd(dev, reg, CSPL_MBOX_CMD_PAUSE);
regmap_update_bits(reg, CS35L41_PWR_CTRL2,
CS35L41_VMON_EN_MASK | CS35L41_IMON_EN_MASK,
@@ -675,7 +674,7 @@ static void cs35l41_hda_playback_hook(struct device *dev, int action)
break;
case HDA_GEN_PCM_ACT_CLOSE:
mutex_lock(&cs35l41->fw_mutex);
- if (!cs35l41->firmware_running && cs35l41->request_fw_load &&
+ if (!cs35l41->cs_dsp.running && cs35l41->request_fw_load &&
!cs35l41->fw_request_ongoing) {
dev_info(dev, "Requesting Firmware Load after HDA_GEN_PCM_ACT_CLOSE\n");
cs35l41->fw_request_ongoing = true;
@@ -761,10 +760,9 @@ static int cs35l41_verify_id(struct cs35l41_hda *cs35l41, unsigned int *regid, u
static int cs35l41_ready_for_reset(struct cs35l41_hda *cs35l41)
{
mutex_lock(&cs35l41->fw_mutex);
- if (cs35l41->firmware_running) {
+ if (cs35l41->cs_dsp.running) {
cs35l41->cs_dsp.running = false;
cs35l41->cs_dsp.booted = false;
- cs35l41->firmware_running = false;
}
regcache_mark_dirty(cs35l41->regmap);
mutex_unlock(&cs35l41->fw_mutex);
@@ -925,7 +923,7 @@ static int cs35l41_runtime_suspend(struct device *dev)
mutex_lock(&cs35l41->fw_mutex);
- if (cs35l41->firmware_running) {
+ if (cs35l41->cs_dsp.running) {
ret = cs35l41_enter_hibernate(cs35l41->dev, cs35l41->regmap,
cs35l41->hw_cfg.bst_type);
if (ret)
@@ -960,7 +958,7 @@ static int cs35l41_runtime_resume(struct device *dev)
regcache_cache_only(cs35l41->regmap, false);
- if (cs35l41->firmware_running) {
+ if (cs35l41->cs_dsp.running) {
ret = cs35l41_exit_hibernate(cs35l41->dev, cs35l41->regmap);
if (ret) {
dev_warn(cs35l41->dev, "Unable to exit Hibernate.");
@@ -1052,8 +1050,6 @@ static int cs35l41_smart_amp(struct cs35l41_hda *cs35l41)
goto clean_dsp;
}
- cs35l41->firmware_running = true;
-
return 0;
clean_dsp:
@@ -1063,10 +1059,10 @@ clean_dsp:
static void cs35l41_load_firmware(struct cs35l41_hda *cs35l41, bool load)
{
- if (cs35l41->firmware_running && !load) {
+ if (cs35l41->cs_dsp.running && !load) {
dev_dbg(cs35l41->dev, "Unloading Firmware\n");
cs35l41_shutdown_dsp(cs35l41);
- } else if (!cs35l41->firmware_running && load) {
+ } else if (!cs35l41->cs_dsp.running && load) {
dev_dbg(cs35l41->dev, "Loading Firmware\n");
cs35l41_smart_amp(cs35l41);
} else {
@@ -1346,7 +1342,7 @@ static int cs35l41_hda_bind(struct device *dev, struct device *master, void *mas
cs35l41->acpi_subsystem_id, cs35l41->hw_cfg.bst_type,
cs35l41->hw_cfg.gpio1.func == CS35l41_VSPK_SWITCH,
cs35l41->hw_cfg.spk_pos ? 'R' : 'L',
- cs35l41->firmware_running, cs35l41->speaker_id);
+ cs35l41->cs_dsp.running, cs35l41->speaker_id);
return ret;
}
diff --git a/sound/pci/hda/cs35l56_hda_spi.c b/sound/pci/hda/cs35l56_hda_spi.c
index 756aec342eab..27d7fbc56b4c 100644
--- a/sound/pci/hda/cs35l56_hda_spi.c
+++ b/sound/pci/hda/cs35l56_hda_spi.c
@@ -21,6 +21,10 @@ static int cs35l56_hda_spi_probe(struct spi_device *spi)
return -ENOMEM;
cs35l56->base.dev = &spi->dev;
+
+#ifdef CS35L56_WAKE_HOLD_TIME_US
+ cs35l56->base.can_hibernate = true;
+#endif
cs35l56->base.regmap = devm_regmap_init_spi(spi, &cs35l56_regmap_spi);
if (IS_ERR(cs35l56->base.regmap)) {
ret = PTR_ERR(cs35l56->base.regmap);
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index db90feb49c16..2d1df3654424 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -2242,6 +2242,8 @@ static const struct snd_pci_quirk power_save_denylist[] = {
SND_PCI_QUIRK(0x17aa, 0x36a7, "Lenovo C50 All in one", 0),
/* https://bugs.launchpad.net/bugs/1821663 */
SND_PCI_QUIRK(0x1631, 0xe017, "Packard Bell NEC IMEDIA 5204", 0),
+ /* KONTRON SinglePC may cause a stall at runtime resume */
+ SND_PCI_QUIRK(0x1734, 0x1232, "KONTRON SinglePC", 0),
{}
};
#endif /* CONFIG_PM */
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 5618b1d9bfd1..0377912e9264 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -1987,6 +1987,7 @@ enum {
ALC887_FIXUP_ASUS_AUDIO,
ALC887_FIXUP_ASUS_HMIC,
ALCS1200A_FIXUP_MIC_VREF,
+ ALC888VD_FIXUP_MIC_100VREF,
};
static void alc889_fixup_coef(struct hda_codec *codec,
@@ -2540,6 +2541,13 @@ static const struct hda_fixup alc882_fixups[] = {
{}
}
},
+ [ALC888VD_FIXUP_MIC_100VREF] = {
+ .type = HDA_FIXUP_PINCTLS,
+ .v.pins = (const struct hda_pintbl[]) {
+ { 0x18, PIN_VREF100 }, /* headset mic */
+ {}
+ }
+ },
};
static const struct snd_pci_quirk alc882_fixup_tbl[] = {
@@ -2609,6 +2617,7 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
SND_PCI_QUIRK(0x106b, 0x4a00, "Macbook 5,2", ALC889_FIXUP_MBA11_VREF),
SND_PCI_QUIRK(0x1071, 0x8258, "Evesham Voyaeger", ALC882_FIXUP_EAPD),
+ SND_PCI_QUIRK(0x10ec, 0x12d8, "iBase Elo Touch", ALC888VD_FIXUP_MIC_100VREF),
SND_PCI_QUIRK(0x13fe, 0x1009, "Advantech MIT-W101", ALC886_FIXUP_EAPD),
SND_PCI_QUIRK(0x1458, 0xa002, "Gigabyte EP45-DS3/Z87X-UD3H", ALC889_FIXUP_FRONT_HP_NO_PRESENCE),
SND_PCI_QUIRK(0x1458, 0xa0b8, "Gigabyte AZ370-Gaming", ALC1220_FIXUP_GB_DUAL_CODECS),
@@ -3256,6 +3265,7 @@ static void alc_disable_headset_jack_key(struct hda_codec *codec)
case 0x10ec0230:
case 0x10ec0236:
case 0x10ec0256:
+ case 0x10ec0257:
case 0x19e58326:
alc_write_coef_idx(codec, 0x48, 0x0);
alc_update_coef_idx(codec, 0x49, 0x0045, 0x0);
@@ -3285,6 +3295,7 @@ static void alc_enable_headset_jack_key(struct hda_codec *codec)
case 0x10ec0230:
case 0x10ec0236:
case 0x10ec0256:
+ case 0x10ec0257:
case 0x19e58326:
alc_write_coef_idx(codec, 0x48, 0xd011);
alc_update_coef_idx(codec, 0x49, 0x007f, 0x0045);
@@ -6496,6 +6507,7 @@ static void alc_combo_jack_hp_jd_restart(struct hda_codec *codec)
case 0x10ec0236:
case 0x10ec0255:
case 0x10ec0256:
+ case 0x10ec0257:
case 0x19e58326:
alc_update_coef_idx(codec, 0x1b, 0x8000, 1 << 15); /* Reset HP JD */
alc_update_coef_idx(codec, 0x1b, 0x8000, 0 << 15);
@@ -7249,6 +7261,7 @@ enum {
ALC290_FIXUP_SUBWOOFER_HSJACK,
ALC269_FIXUP_THINKPAD_ACPI,
ALC269_FIXUP_DMIC_THINKPAD_ACPI,
+ ALC269VB_FIXUP_CHUWI_COREBOOK_XPRO,
ALC255_FIXUP_ACER_MIC_NO_PRESENCE,
ALC255_FIXUP_ASUS_MIC_NO_PRESENCE,
ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
@@ -7601,6 +7614,14 @@ static const struct hda_fixup alc269_fixups[] = {
.type = HDA_FIXUP_FUNC,
.v.func = alc269_fixup_pincfg_U7x7_headset_mic,
},
+ [ALC269VB_FIXUP_CHUWI_COREBOOK_XPRO] = {
+ .type = HDA_FIXUP_PINS,
+ .v.pins = (const struct hda_pintbl[]) {
+ { 0x18, 0x03a19020 }, /* headset mic */
+ { 0x1b, 0x90170150 }, /* speaker */
+ { }
+ },
+ },
[ALC269_FIXUP_AMIC] = {
.type = HDA_FIXUP_PINS,
.v.pins = (const struct hda_pintbl[]) {
@@ -9684,6 +9705,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x1028, 0x0b1a, "Dell Precision 5570", ALC289_FIXUP_DUAL_SPK),
SND_PCI_QUIRK(0x1028, 0x0b37, "Dell Inspiron 16 Plus 7620 2-in-1", ALC295_FIXUP_DELL_INSPIRON_TOP_SPEAKERS),
SND_PCI_QUIRK(0x1028, 0x0b71, "Dell Inspiron 16 Plus 7620", ALC295_FIXUP_DELL_INSPIRON_TOP_SPEAKERS),
+ SND_PCI_QUIRK(0x1028, 0x0beb, "Dell XPS 15 9530 (2023)", ALC289_FIXUP_DELL_CS35L41_SPI_2),
SND_PCI_QUIRK(0x1028, 0x0c03, "Dell Precision 5340", ALC269_FIXUP_DELL4_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1028, 0x0c19, "Dell Precision 3340", ALC236_FIXUP_DELL_DUAL_CODECS),
SND_PCI_QUIRK(0x1028, 0x0c1a, "Dell Precision 3340", ALC236_FIXUP_DELL_DUAL_CODECS),
@@ -9942,6 +9964,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x1043, 0x17f3, "ROG Ally RC71L_RC71L", ALC294_FIXUP_ASUS_ALLY),
SND_PCI_QUIRK(0x1043, 0x1881, "ASUS Zephyrus S/M", ALC294_FIXUP_ASUS_GX502_PINS),
SND_PCI_QUIRK(0x1043, 0x18b1, "Asus MJ401TA", ALC256_FIXUP_ASUS_HEADSET_MIC),
+ SND_PCI_QUIRK(0x1043, 0x18d3, "ASUS UM3504DA", ALC294_FIXUP_CS35L41_I2C_2),
SND_PCI_QUIRK(0x1043, 0x18f1, "Asus FX505DT", ALC256_FIXUP_ASUS_HEADSET_MIC),
SND_PCI_QUIRK(0x1043, 0x194e, "ASUS UX563FD", ALC294_FIXUP_ASUS_HPE),
SND_PCI_QUIRK(0x1043, 0x1970, "ASUS UX550VE", ALC289_FIXUP_ASUS_GA401),
@@ -10183,6 +10206,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x17aa, 0x387d, "Yoga S780-16 pro Quad AAC", ALC287_FIXUP_TAS2781_I2C),
SND_PCI_QUIRK(0x17aa, 0x387e, "Yoga S780-16 pro Quad YC", ALC287_FIXUP_TAS2781_I2C),
SND_PCI_QUIRK(0x17aa, 0x3881, "YB9 dual power mode2 YC", ALC287_FIXUP_TAS2781_I2C),
+ SND_PCI_QUIRK(0x17aa, 0x3882, "Lenovo Yoga Pro 7 14APH8", ALC287_FIXUP_YOGA9_14IAP7_BASS_SPK_PIN),
SND_PCI_QUIRK(0x17aa, 0x3884, "Y780 YG DUAL", ALC287_FIXUP_TAS2781_I2C),
SND_PCI_QUIRK(0x17aa, 0x3886, "Y780 VECO DUAL", ALC287_FIXUP_TAS2781_I2C),
SND_PCI_QUIRK(0x17aa, 0x38a7, "Y780P AMD YG dual", ALC287_FIXUP_TAS2781_I2C),
@@ -10244,11 +10268,14 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x1d72, 0x1901, "RedmiBook 14", ALC256_FIXUP_ASUS_HEADSET_MIC),
SND_PCI_QUIRK(0x1d72, 0x1945, "Redmi G", ALC256_FIXUP_ASUS_HEADSET_MIC),
SND_PCI_QUIRK(0x1d72, 0x1947, "RedmiBook Air", ALC255_FIXUP_XIAOMI_HEADSET_MIC),
+ SND_PCI_QUIRK(0x2782, 0x0232, "CHUWI CoreBook XPro", ALC269VB_FIXUP_CHUWI_COREBOOK_XPRO),
SND_PCI_QUIRK(0x8086, 0x2074, "Intel NUC 8", ALC233_FIXUP_INTEL_NUC8_DMIC),
SND_PCI_QUIRK(0x8086, 0x2080, "Intel NUC 8 Rugged", ALC256_FIXUP_INTEL_NUC8_RUGGED),
SND_PCI_QUIRK(0x8086, 0x2081, "Intel NUC 10", ALC256_FIXUP_INTEL_NUC10),
SND_PCI_QUIRK(0x8086, 0x3038, "Intel NUC 13", ALC295_FIXUP_CHROME_BOOK),
SND_PCI_QUIRK(0xf111, 0x0001, "Framework Laptop", ALC295_FIXUP_FRAMEWORK_LAPTOP_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0xf111, 0x0005, "Framework Laptop", ALC295_FIXUP_FRAMEWORK_LAPTOP_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0xf111, 0x0006, "Framework Laptop", ALC295_FIXUP_FRAMEWORK_LAPTOP_MIC_NO_PRESENCE),
#if 0
/* Below is a quirk table taken from the old code.
@@ -12174,6 +12201,7 @@ static const struct snd_pci_quirk alc662_fixup_tbl[] = {
SND_PCI_QUIRK(0x17aa, 0x32f7, "Lenovo ThinkCentre M90", ALC897_FIXUP_HEADSET_MIC_PIN),
SND_PCI_QUIRK(0x17aa, 0x3321, "Lenovo ThinkCentre M70 Gen4", ALC897_FIXUP_HEADSET_MIC_PIN),
SND_PCI_QUIRK(0x17aa, 0x331b, "Lenovo ThinkCentre M90 Gen4", ALC897_FIXUP_HEADSET_MIC_PIN),
+ SND_PCI_QUIRK(0x17aa, 0x3364, "Lenovo ThinkCentre M90 Gen5", ALC897_FIXUP_HEADSET_MIC_PIN),
SND_PCI_QUIRK(0x17aa, 0x3742, "Lenovo TianYi510Pro-14IOB", ALC897_FIXUP_HEADSET_MIC_PIN2),
SND_PCI_QUIRK(0x17aa, 0x38af, "Lenovo Ideapad Y550P", ALC662_FIXUP_IDEAPAD),
SND_PCI_QUIRK(0x17aa, 0x3a0d, "Lenovo Ideapad Y550", ALC662_FIXUP_IDEAPAD),
diff --git a/sound/soc/amd/acp-config.c b/sound/soc/amd/acp-config.c
index 20cee7104c2b..3bc4b2e41650 100644
--- a/sound/soc/amd/acp-config.c
+++ b/sound/soc/amd/acp-config.c
@@ -111,6 +111,20 @@ static const struct config_entry config_table[] = {
.matches = {
DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "HUAWEI"),
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "HVY-WXX9"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "M1010"),
+ },
+ },
+ {}
+ },
+ },
+ {
+ .flags = FLAG_AMD_LEGACY,
+ .device = ACP_PCI_DEV_ID,
+ .dmi_table = (const struct dmi_system_id []) {
+ {
+ .matches = {
+ DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "HUAWEI"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "HVY-WXX9"),
DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "M1020"),
},
},
diff --git a/sound/soc/amd/yc/acp6x-mach.c b/sound/soc/amd/yc/acp6x-mach.c
index 15a864dcd7bd..d83cb6e4c62a 100644
--- a/sound/soc/amd/yc/acp6x-mach.c
+++ b/sound/soc/amd/yc/acp6x-mach.c
@@ -286,6 +286,13 @@ static const struct dmi_system_id yc_acp_quirk_table[] = {
{
.driver_data = &acp6x_card,
.matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "E1504FA"),
+ }
+ },
+ {
+ .driver_data = &acp6x_card,
+ .matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "Micro-Star International Co., Ltd."),
DMI_MATCH(DMI_PRODUCT_NAME, "Bravo 15 B7ED"),
}
@@ -370,6 +377,13 @@ static const struct dmi_system_id yc_acp_quirk_table[] = {
{
.driver_data = &acp6x_card,
.matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "HP"),
+ DMI_MATCH(DMI_BOARD_NAME, "8B2F"),
+ }
+ },
+ {
+ .driver_data = &acp6x_card,
+ .matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "MECHREVO"),
DMI_MATCH(DMI_BOARD_NAME, "MRID6"),
}
@@ -381,6 +395,13 @@ static const struct dmi_system_id yc_acp_quirk_table[] = {
DMI_MATCH(DMI_PRODUCT_VERSION, "pang12"),
}
},
+ {
+ .driver_data = &acp6x_card,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "System76"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "pang13"),
+ }
+ },
{}
};
diff --git a/sound/soc/codecs/cs35l41-lib.c b/sound/soc/codecs/cs35l41-lib.c
index 4569e4f7cf7e..e9993a39f7d0 100644
--- a/sound/soc/codecs/cs35l41-lib.c
+++ b/sound/soc/codecs/cs35l41-lib.c
@@ -16,6 +16,8 @@
#include <sound/cs35l41.h>
+#define CS35L41_FIRMWARE_OLD_VERSION 0x001C00 /* v0.28.0 */
+
static const struct reg_default cs35l41_reg[] = {
{ CS35L41_PWR_CTRL1, 0x00000000 },
{ CS35L41_PWR_CTRL2, 0x00000000 },
@@ -1214,7 +1216,7 @@ EXPORT_SYMBOL_GPL(cs35l41_safe_reset);
* the PLL Lock interrupt, in the IRQ handler.
*/
int cs35l41_global_enable(struct device *dev, struct regmap *regmap, enum cs35l41_boost_type b_type,
- int enable, bool firmware_running)
+ int enable, struct cs_dsp *dsp)
{
int ret;
unsigned int gpio1_func, pad_control, pwr_ctrl1, pwr_ctrl3, int_status, pup_pdn_mask;
@@ -1309,7 +1311,7 @@ int cs35l41_global_enable(struct device *dev, struct regmap *regmap, enum cs35l4
}
regmap_write(regmap, CS35L41_IRQ1_STATUS1, CS35L41_PUP_DONE_MASK);
- if (firmware_running)
+ if (dsp->running && dsp->fw_id_version > CS35L41_FIRMWARE_OLD_VERSION)
ret = cs35l41_set_cspl_mbox_cmd(dev, regmap,
CSPL_MBOX_CMD_SPK_OUT_ENABLE);
else
diff --git a/sound/soc/codecs/cs35l41.c b/sound/soc/codecs/cs35l41.c
index d0e9128ac6d0..dfb4ce53491b 100644
--- a/sound/soc/codecs/cs35l41.c
+++ b/sound/soc/codecs/cs35l41.c
@@ -519,11 +519,11 @@ static int cs35l41_main_amp_event(struct snd_soc_dapm_widget *w,
ARRAY_SIZE(cs35l41_pup_patch));
ret = cs35l41_global_enable(cs35l41->dev, cs35l41->regmap, cs35l41->hw_cfg.bst_type,
- 1, cs35l41->dsp.cs_dsp.running);
+ 1, &cs35l41->dsp.cs_dsp);
break;
case SND_SOC_DAPM_POST_PMD:
ret = cs35l41_global_enable(cs35l41->dev, cs35l41->regmap, cs35l41->hw_cfg.bst_type,
- 0, cs35l41->dsp.cs_dsp.running);
+ 0, &cs35l41->dsp.cs_dsp);
regmap_multi_reg_write_bypassed(cs35l41->regmap,
cs35l41_pdn_patch,
diff --git a/sound/soc/codecs/cs43130.c b/sound/soc/codecs/cs43130.c
index 0b40fdfb1825..d8ec325b9cc9 100644
--- a/sound/soc/codecs/cs43130.c
+++ b/sound/soc/codecs/cs43130.c
@@ -578,7 +578,7 @@ static int cs43130_set_sp_fmt(int dai_id, unsigned int bitwidth_sclk,
break;
case SND_SOC_DAIFMT_LEFT_J:
hi_size = bitwidth_sclk;
- frm_delay = 2;
+ frm_delay = 0;
frm_phase = 1;
break;
case SND_SOC_DAIFMT_DSP_A:
@@ -1682,7 +1682,7 @@ static ssize_t hpload_dc_r_show(struct device *dev,
return cs43130_show_dc(dev, buf, HP_RIGHT);
}
-static u16 const cs43130_ac_freq[CS43130_AC_FREQ] = {
+static const u16 cs43130_ac_freq[CS43130_AC_FREQ] = {
24,
43,
93,
@@ -2362,7 +2362,7 @@ static const struct regmap_config cs43130_regmap = {
.use_single_write = true,
};
-static u16 const cs43130_dc_threshold[CS43130_DC_THRESHOLD] = {
+static const u16 cs43130_dc_threshold[CS43130_DC_THRESHOLD] = {
50,
120,
};
diff --git a/sound/soc/codecs/da7219-aad.c b/sound/soc/codecs/da7219-aad.c
index 4c4405942779..6bc068cdcbe2 100644
--- a/sound/soc/codecs/da7219-aad.c
+++ b/sound/soc/codecs/da7219-aad.c
@@ -696,7 +696,7 @@ static struct da7219_aad_pdata *da7219_aad_fw_to_pdata(struct device *dev)
aad_pdata->mic_det_thr =
da7219_aad_fw_mic_det_thr(dev, fw_val32);
else
- aad_pdata->mic_det_thr = DA7219_AAD_MIC_DET_THR_500_OHMS;
+ aad_pdata->mic_det_thr = DA7219_AAD_MIC_DET_THR_200_OHMS;
if (fwnode_property_read_u32(aad_np, "dlg,jack-ins-deb", &fw_val32) >= 0)
aad_pdata->jack_ins_deb =
diff --git a/sound/soc/codecs/hdac_hda.c b/sound/soc/codecs/hdac_hda.c
index 355f30779a34..b075689db2dc 100644
--- a/sound/soc/codecs/hdac_hda.c
+++ b/sound/soc/codecs/hdac_hda.c
@@ -132,6 +132,9 @@ static struct snd_soc_dai_driver hdac_hda_dais[] = {
.sig_bits = 24,
},
},
+};
+
+static struct snd_soc_dai_driver hdac_hda_hdmi_dais[] = {
{
.id = HDAC_HDMI_0_DAI_ID,
.name = "intel-hdmi-hifi1",
@@ -607,8 +610,16 @@ static const struct snd_soc_component_driver hdac_hda_codec = {
.endianness = 1,
};
+static const struct snd_soc_component_driver hdac_hda_hdmi_codec = {
+ .probe = hdac_hda_codec_probe,
+ .remove = hdac_hda_codec_remove,
+ .idle_bias_on = false,
+ .endianness = 1,
+};
+
static int hdac_hda_dev_probe(struct hdac_device *hdev)
{
+ struct hdac_hda_priv *hda_pvt = dev_get_drvdata(&hdev->dev);
struct hdac_ext_link *hlink;
int ret;
@@ -621,9 +632,15 @@ static int hdac_hda_dev_probe(struct hdac_device *hdev)
snd_hdac_ext_bus_link_get(hdev->bus, hlink);
/* ASoC specific initialization */
- ret = devm_snd_soc_register_component(&hdev->dev,
- &hdac_hda_codec, hdac_hda_dais,
- ARRAY_SIZE(hdac_hda_dais));
+ if (hda_pvt->need_display_power)
+ ret = devm_snd_soc_register_component(&hdev->dev,
+ &hdac_hda_hdmi_codec, hdac_hda_hdmi_dais,
+ ARRAY_SIZE(hdac_hda_hdmi_dais));
+ else
+ ret = devm_snd_soc_register_component(&hdev->dev,
+ &hdac_hda_codec, hdac_hda_dais,
+ ARRAY_SIZE(hdac_hda_dais));
+
if (ret < 0) {
dev_err(&hdev->dev, "failed to register HDA codec %d\n", ret);
return ret;
diff --git a/sound/soc/codecs/lpass-tx-macro.c b/sound/soc/codecs/lpass-tx-macro.c
index 82f9873ffada..124c2e144f33 100644
--- a/sound/soc/codecs/lpass-tx-macro.c
+++ b/sound/soc/codecs/lpass-tx-macro.c
@@ -2021,6 +2021,11 @@ static int tx_macro_probe(struct platform_device *pdev)
tx->dev = dev;
+ /* Set active_decimator default value */
+ tx->active_decimator[TX_MACRO_AIF1_CAP] = -1;
+ tx->active_decimator[TX_MACRO_AIF2_CAP] = -1;
+ tx->active_decimator[TX_MACRO_AIF3_CAP] = -1;
+
/* set MCLK and NPL rates */
clk_set_rate(tx->mclk, MCLK_FREQ);
clk_set_rate(tx->npl, MCLK_FREQ);
diff --git a/sound/soc/codecs/nau8822.c b/sound/soc/codecs/nau8822.c
index ff3024899f45..7199d734c79f 100644
--- a/sound/soc/codecs/nau8822.c
+++ b/sound/soc/codecs/nau8822.c
@@ -184,6 +184,7 @@ static int nau8822_eq_get(struct snd_kcontrol *kcontrol,
struct soc_bytes_ext *params = (void *)kcontrol->private_value;
int i, reg;
u16 reg_val, *val;
+ __be16 tmp;
val = (u16 *)ucontrol->value.bytes.data;
reg = NAU8822_REG_EQ1;
@@ -192,8 +193,8 @@ static int nau8822_eq_get(struct snd_kcontrol *kcontrol,
/* conversion of 16-bit integers between native CPU format
* and big endian format
*/
- reg_val = cpu_to_be16(reg_val);
- memcpy(val + i, &reg_val, sizeof(reg_val));
+ tmp = cpu_to_be16(reg_val);
+ memcpy(val + i, &tmp, sizeof(tmp));
}
return 0;
@@ -216,6 +217,7 @@ static int nau8822_eq_put(struct snd_kcontrol *kcontrol,
void *data;
u16 *val, value;
int i, reg, ret;
+ __be16 *tmp;
data = kmemdup(ucontrol->value.bytes.data,
params->max, GFP_KERNEL | GFP_DMA);
@@ -228,7 +230,8 @@ static int nau8822_eq_put(struct snd_kcontrol *kcontrol,
/* conversion of 16-bit integers between native CPU format
* and big endian format
*/
- value = be16_to_cpu(*(val + i));
+ tmp = (__be16 *)(val + i);
+ value = be16_to_cpup(tmp);
ret = snd_soc_component_write(component, reg + i, value);
if (ret) {
dev_err(component->dev,
diff --git a/sound/soc/codecs/rt5645.c b/sound/soc/codecs/rt5645.c
index 7938b52d741d..a0d01d71d8b5 100644
--- a/sound/soc/codecs/rt5645.c
+++ b/sound/soc/codecs/rt5645.c
@@ -448,6 +448,7 @@ struct rt5645_priv {
struct regulator_bulk_data supplies[ARRAY_SIZE(rt5645_supply_names)];
struct rt5645_eq_param_s *eq_param;
struct timer_list btn_check_timer;
+ struct mutex jd_mutex;
int codec_type;
int sysclk;
@@ -3193,6 +3194,8 @@ static int rt5645_jack_detect(struct snd_soc_component *component, int jack_inse
rt5645_enable_push_button_irq(component, true);
}
} else {
+ if (rt5645->en_button_func)
+ rt5645_enable_push_button_irq(component, false);
snd_soc_dapm_disable_pin(dapm, "Mic Det Power");
snd_soc_dapm_sync(dapm);
rt5645->jack_type = SND_JACK_HEADPHONE;
@@ -3295,6 +3298,8 @@ static void rt5645_jack_detect_work(struct work_struct *work)
if (!rt5645->component)
return;
+ mutex_lock(&rt5645->jd_mutex);
+
switch (rt5645->pdata.jd_mode) {
case 0: /* Not using rt5645 JD */
if (rt5645->gpiod_hp_det) {
@@ -3321,7 +3326,7 @@ static void rt5645_jack_detect_work(struct work_struct *work)
if (!val && (rt5645->jack_type == 0)) { /* jack in */
report = rt5645_jack_detect(rt5645->component, 1);
- } else if (!val && rt5645->jack_type != 0) {
+ } else if (!val && rt5645->jack_type == SND_JACK_HEADSET) {
/* for push button and jack out */
btn_type = 0;
if (snd_soc_component_read(rt5645->component, RT5645_INT_IRQ_ST) & 0x4) {
@@ -3377,6 +3382,8 @@ static void rt5645_jack_detect_work(struct work_struct *work)
rt5645_jack_detect(rt5645->component, 0);
}
+ mutex_unlock(&rt5645->jd_mutex);
+
snd_soc_jack_report(rt5645->hp_jack, report, SND_JACK_HEADPHONE);
snd_soc_jack_report(rt5645->mic_jack, report, SND_JACK_MICROPHONE);
if (rt5645->en_button_func)
@@ -4150,6 +4157,7 @@ static int rt5645_i2c_probe(struct i2c_client *i2c)
}
timer_setup(&rt5645->btn_check_timer, rt5645_btn_check_callback, 0);
+ mutex_init(&rt5645->jd_mutex);
INIT_DELAYED_WORK(&rt5645->jack_detect_work, rt5645_jack_detect_work);
INIT_DELAYED_WORK(&rt5645->rcclock_work, rt5645_rcclock_work);
diff --git a/sound/soc/codecs/wm8974.c b/sound/soc/codecs/wm8974.c
index 044b6f604c09..260bac695b20 100644
--- a/sound/soc/codecs/wm8974.c
+++ b/sound/soc/codecs/wm8974.c
@@ -186,7 +186,7 @@ SOC_DAPM_SINGLE("PCM Playback Switch", WM8974_MONOMIX, 0, 1, 0),
/* Boost mixer */
static const struct snd_kcontrol_new wm8974_boost_mixer[] = {
-SOC_DAPM_SINGLE("Aux Switch", WM8974_INPPGA, 6, 1, 1),
+SOC_DAPM_SINGLE("PGA Switch", WM8974_INPPGA, 6, 1, 1),
};
/* Input PGA */
@@ -246,8 +246,8 @@ static const struct snd_soc_dapm_route wm8974_dapm_routes[] = {
/* Boost Mixer */
{"ADC", NULL, "Boost Mixer"},
- {"Boost Mixer", "Aux Switch", "Aux Input"},
- {"Boost Mixer", NULL, "Input PGA"},
+ {"Boost Mixer", NULL, "Aux Input"},
+ {"Boost Mixer", "PGA Switch", "Input PGA"},
{"Boost Mixer", NULL, "MICP"},
/* Input PGA */
diff --git a/sound/soc/codecs/wm_adsp.c b/sound/soc/codecs/wm_adsp.c
index 236b12b69ae5..c01e31175015 100644
--- a/sound/soc/codecs/wm_adsp.c
+++ b/sound/soc/codecs/wm_adsp.c
@@ -1451,12 +1451,12 @@ static int wm_adsp_buffer_populate(struct wm_adsp_compr_buf *buf)
ret = wm_adsp_buffer_read(buf, caps->region_defs[i].base_offset,
&region->base_addr);
if (ret < 0)
- return ret;
+ goto err;
ret = wm_adsp_buffer_read(buf, caps->region_defs[i].size_offset,
&offset);
if (ret < 0)
- return ret;
+ goto err;
region->cumulative_size = offset;
@@ -1467,6 +1467,10 @@ static int wm_adsp_buffer_populate(struct wm_adsp_compr_buf *buf)
}
return 0;
+
+err:
+ kfree(buf->regions);
+ return ret;
}
static void wm_adsp_buffer_clear(struct wm_adsp_compr_buf *buf)
diff --git a/sound/soc/fsl/Kconfig b/sound/soc/fsl/Kconfig
index 725c530a3636..be342ee03fb9 100644
--- a/sound/soc/fsl/Kconfig
+++ b/sound/soc/fsl/Kconfig
@@ -360,6 +360,7 @@ config SND_SOC_IMX_HDMI
config SND_SOC_IMX_RPMSG
tristate "SoC Audio support for i.MX boards with rpmsg"
depends on RPMSG
+ depends on OF && I2C
select SND_SOC_IMX_PCM_RPMSG
select SND_SOC_IMX_AUDIO_RPMSG
help
diff --git a/sound/soc/fsl/fsl_sai.c b/sound/soc/fsl/fsl_sai.c
index 79e7c6b98a75..32bbe5056a63 100644
--- a/sound/soc/fsl/fsl_sai.c
+++ b/sound/soc/fsl/fsl_sai.c
@@ -673,6 +673,20 @@ static int fsl_sai_hw_params(struct snd_pcm_substream *substream,
FSL_SAI_CR3_TRCE_MASK,
FSL_SAI_CR3_TRCE((dl_cfg[dl_cfg_idx].mask[tx] & trce_mask)));
+ /*
+ * When the TERE and FSD_MSTR enabled before configuring the word width
+ * There will be no frame sync clock issue, because word width impact
+ * the generation of frame sync clock.
+ *
+ * TERE enabled earlier only for i.MX8MP case for the hardware limitation,
+ * We need to disable FSD_MSTR before configuring word width, then enable
+ * FSD_MSTR bit for this specific case.
+ */
+ if (sai->soc_data->mclk_with_tere && sai->mclk_direction_output &&
+ !sai->is_consumer_mode)
+ regmap_update_bits(sai->regmap, FSL_SAI_xCR4(tx, ofs),
+ FSL_SAI_CR4_FSD_MSTR, 0);
+
regmap_update_bits(sai->regmap, FSL_SAI_xCR4(tx, ofs),
FSL_SAI_CR4_SYWD_MASK | FSL_SAI_CR4_FRSZ_MASK |
FSL_SAI_CR4_CHMOD_MASK,
@@ -680,6 +694,13 @@ static int fsl_sai_hw_params(struct snd_pcm_substream *substream,
regmap_update_bits(sai->regmap, FSL_SAI_xCR5(tx, ofs),
FSL_SAI_CR5_WNW_MASK | FSL_SAI_CR5_W0W_MASK |
FSL_SAI_CR5_FBT_MASK, val_cr5);
+
+ /* Enable FSD_MSTR after configuring word width */
+ if (sai->soc_data->mclk_with_tere && sai->mclk_direction_output &&
+ !sai->is_consumer_mode)
+ regmap_update_bits(sai->regmap, FSL_SAI_xCR4(tx, ofs),
+ FSL_SAI_CR4_FSD_MSTR, FSL_SAI_CR4_FSD_MSTR);
+
regmap_write(sai->regmap, FSL_SAI_xMR(tx),
~0UL - ((1 << min(channels, slots)) - 1));
diff --git a/sound/soc/fsl/fsl_xcvr.c b/sound/soc/fsl/fsl_xcvr.c
index fa0a15263c66..f0fb33d719c2 100644
--- a/sound/soc/fsl/fsl_xcvr.c
+++ b/sound/soc/fsl/fsl_xcvr.c
@@ -358,7 +358,7 @@ static int fsl_xcvr_en_aud_pll(struct fsl_xcvr *xcvr, u32 freq)
struct device *dev = &xcvr->pdev->dev;
int ret;
- freq = xcvr->soc_data->spdif_only ? freq / 10 : freq;
+ freq = xcvr->soc_data->spdif_only ? freq / 5 : freq;
clk_disable_unprepare(xcvr->phy_clk);
ret = clk_set_rate(xcvr->phy_clk, freq);
if (ret < 0) {
@@ -409,11 +409,21 @@ static int fsl_xcvr_prepare(struct snd_pcm_substream *substream,
bool tx = substream->stream == SNDRV_PCM_STREAM_PLAYBACK;
u32 m_ctl = 0, v_ctl = 0;
u32 r = substream->runtime->rate, ch = substream->runtime->channels;
- u32 fout = 32 * r * ch * 10 * 2;
+ u32 fout = 32 * r * ch * 10;
int ret = 0;
switch (xcvr->mode) {
case FSL_XCVR_MODE_SPDIF:
+ if (xcvr->soc_data->spdif_only && tx) {
+ ret = regmap_update_bits(xcvr->regmap, FSL_XCVR_TX_DPTH_CTRL_SET,
+ FSL_XCVR_TX_DPTH_CTRL_BYPASS_FEM,
+ FSL_XCVR_TX_DPTH_CTRL_BYPASS_FEM);
+ if (ret < 0) {
+ dev_err(dai->dev, "Failed to set bypass fem: %d\n", ret);
+ return ret;
+ }
+ }
+ fallthrough;
case FSL_XCVR_MODE_ARC:
if (tx) {
ret = fsl_xcvr_en_aud_pll(xcvr, fout);
diff --git a/sound/soc/intel/boards/skl_hda_dsp_generic.c b/sound/soc/intel/boards/skl_hda_dsp_generic.c
index 6c6ef63cd5d9..6e172719c979 100644
--- a/sound/soc/intel/boards/skl_hda_dsp_generic.c
+++ b/sound/soc/intel/boards/skl_hda_dsp_generic.c
@@ -154,6 +154,8 @@ static int skl_hda_fill_card_info(struct snd_soc_acpi_mach_params *mach_params)
card->dapm_widgets = skl_hda_widgets;
card->num_dapm_widgets = ARRAY_SIZE(skl_hda_widgets);
if (!ctx->idisp_codec) {
+ card->dapm_routes = &skl_hda_map[IDISP_ROUTE_COUNT];
+ num_route -= IDISP_ROUTE_COUNT;
for (i = 0; i < IDISP_DAI_COUNT; i++) {
skl_hda_be_dai_links[i].codecs = &snd_soc_dummy_dlc;
skl_hda_be_dai_links[i].num_codecs = 1;
diff --git a/sound/soc/intel/boards/sof_sdw.c b/sound/soc/intel/boards/sof_sdw.c
index 3312ad8a563b..4e4284729773 100644
--- a/sound/soc/intel/boards/sof_sdw.c
+++ b/sound/soc/intel/boards/sof_sdw.c
@@ -1546,7 +1546,7 @@ static int sof_card_dai_links_create(struct snd_soc_card *card)
{
struct device *dev = card->dev;
struct snd_soc_acpi_mach *mach = dev_get_platdata(card->dev);
- int sdw_be_num = 0, ssp_num = 0, dmic_num = 0, hdmi_num = 0, bt_num = 0;
+ int sdw_be_num = 0, ssp_num = 0, dmic_num = 0, bt_num = 0;
struct mc_private *ctx = snd_soc_card_get_drvdata(card);
struct snd_soc_acpi_mach_params *mach_params = &mach->mach_params;
const struct snd_soc_acpi_link_adr *adr_link = mach_params->links;
@@ -1564,6 +1564,7 @@ static int sof_card_dai_links_create(struct snd_soc_card *card)
char *codec_name, *codec_dai_name;
int i, j, be_id = 0;
int codec_index;
+ int hdmi_num;
int ret;
ret = get_dailink_info(dev, adr_link, &sdw_be_num, &codec_conf_num);
@@ -1584,14 +1585,13 @@ static int sof_card_dai_links_create(struct snd_soc_card *card)
ssp_num = hweight_long(ssp_mask);
}
- if (mach_params->codec_mask & IDISP_CODEC_MASK) {
+ if (mach_params->codec_mask & IDISP_CODEC_MASK)
ctx->hdmi.idisp_codec = true;
- if (sof_sdw_quirk & SOF_SDW_TGL_HDMI)
- hdmi_num = SOF_TGL_HDMI_COUNT;
- else
- hdmi_num = SOF_PRE_TGL_HDMI_COUNT;
- }
+ if (sof_sdw_quirk & SOF_SDW_TGL_HDMI)
+ hdmi_num = SOF_TGL_HDMI_COUNT;
+ else
+ hdmi_num = SOF_PRE_TGL_HDMI_COUNT;
/* enable dmic01 & dmic16k */
if (sof_sdw_quirk & SOF_SDW_PCH_DMIC || mach_params->dmic_num)
@@ -1601,7 +1601,8 @@ static int sof_card_dai_links_create(struct snd_soc_card *card)
bt_num = 1;
dev_dbg(dev, "sdw %d, ssp %d, dmic %d, hdmi %d, bt: %d\n",
- sdw_be_num, ssp_num, dmic_num, hdmi_num, bt_num);
+ sdw_be_num, ssp_num, dmic_num,
+ ctx->hdmi.idisp_codec ? hdmi_num : 0, bt_num);
/* allocate BE dailinks */
num_links = sdw_be_num + ssp_num + dmic_num + hdmi_num + bt_num;
diff --git a/sound/soc/intel/skylake/skl-pcm.c b/sound/soc/intel/skylake/skl-pcm.c
index d0c02e8a6785..174aae6e0398 100644
--- a/sound/soc/intel/skylake/skl-pcm.c
+++ b/sound/soc/intel/skylake/skl-pcm.c
@@ -240,8 +240,10 @@ static int skl_pcm_open(struct snd_pcm_substream *substream,
snd_pcm_set_sync(substream);
mconfig = skl_tplg_fe_get_cpr_module(dai, substream->stream);
- if (!mconfig)
+ if (!mconfig) {
+ kfree(dma_params);
return -EINVAL;
+ }
skl_tplg_d0i3_get(skl, mconfig->d0i3_caps);
@@ -1462,6 +1464,7 @@ int skl_platform_register(struct device *dev)
dais = krealloc(skl->dais, sizeof(skl_fe_dai) +
sizeof(skl_platform_dai), GFP_KERNEL);
if (!dais) {
+ kfree(skl->dais);
ret = -ENOMEM;
goto err;
}
@@ -1474,8 +1477,10 @@ int skl_platform_register(struct device *dev)
ret = devm_snd_soc_register_component(dev, &skl_component,
skl->dais, num_dais);
- if (ret)
+ if (ret) {
+ kfree(skl->dais);
dev_err(dev, "soc component registration failed %d\n", ret);
+ }
err:
return ret;
}
diff --git a/sound/soc/intel/skylake/skl-sst-ipc.c b/sound/soc/intel/skylake/skl-sst-ipc.c
index 7a425271b08b..fd9624ad5f72 100644
--- a/sound/soc/intel/skylake/skl-sst-ipc.c
+++ b/sound/soc/intel/skylake/skl-sst-ipc.c
@@ -1003,8 +1003,10 @@ int skl_ipc_get_large_config(struct sst_generic_ipc *ipc,
reply.size = (reply.header >> 32) & IPC_DATA_OFFSET_SZ_MASK;
buf = krealloc(reply.data, reply.size, GFP_KERNEL);
- if (!buf)
+ if (!buf) {
+ kfree(reply.data);
return -ENOMEM;
+ }
*payload = buf;
*bytes = reply.size;
diff --git a/sound/soc/qcom/sc8280xp.c b/sound/soc/qcom/sc8280xp.c
index d93b18f07be5..39cb0b889aff 100644
--- a/sound/soc/qcom/sc8280xp.c
+++ b/sound/soc/qcom/sc8280xp.c
@@ -27,6 +27,23 @@ struct sc8280xp_snd_data {
static int sc8280xp_snd_init(struct snd_soc_pcm_runtime *rtd)
{
struct sc8280xp_snd_data *data = snd_soc_card_get_drvdata(rtd->card);
+ struct snd_soc_dai *cpu_dai = snd_soc_rtd_to_cpu(rtd, 0);
+ struct snd_soc_card *card = rtd->card;
+
+ switch (cpu_dai->id) {
+ case WSA_CODEC_DMA_RX_0:
+ case WSA_CODEC_DMA_RX_1:
+ /*
+ * set limit of 0dB on Digital Volume for Speakers,
+ * this can prevent damage of speakers to some extent without
+ * active speaker protection
+ */
+ snd_soc_limit_volume(card, "WSA_RX0 Digital Volume", 84);
+ snd_soc_limit_volume(card, "WSA_RX1 Digital Volume", 84);
+ break;
+ default:
+ break;
+ }
return qcom_snd_wcd_jack_setup(rtd, &data->jack, &data->jack_setup);
}
diff --git a/sound/soc/soc-ops.c b/sound/soc/soc-ops.c
index 55b009d3c681..2d25748ca706 100644
--- a/sound/soc/soc-ops.c
+++ b/sound/soc/soc-ops.c
@@ -661,7 +661,7 @@ int snd_soc_limit_volume(struct snd_soc_card *card,
kctl = snd_soc_card_get_kcontrol(card, name);
if (kctl) {
struct soc_mixer_control *mc = (struct soc_mixer_control *)kctl->private_value;
- if (max <= mc->max) {
+ if (max <= mc->max - mc->min) {
mc->platform_max = max;
ret = 0;
}
diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c
index 323e4d7b6adf..f6d1b2e11795 100644
--- a/sound/soc/soc-pcm.c
+++ b/sound/soc/soc-pcm.c
@@ -704,11 +704,6 @@ static int soc_pcm_clean(struct snd_soc_pcm_runtime *rtd,
if (snd_soc_dai_active(dai) == 0 &&
(dai->rate || dai->channels || dai->sample_bits))
soc_pcm_set_dai_params(dai, NULL);
-
- if (snd_soc_dai_stream_active(dai, substream->stream) == 0) {
- if (dai->driver->ops && !dai->driver->ops->mute_unmute_on_trigger)
- snd_soc_dai_digital_mute(dai, 1, substream->stream);
- }
}
}
@@ -947,8 +942,10 @@ static int soc_pcm_hw_clean(struct snd_soc_pcm_runtime *rtd,
if (snd_soc_dai_active(dai) == 1)
soc_pcm_set_dai_params(dai, NULL);
- if (snd_soc_dai_stream_active(dai, substream->stream) == 1)
- snd_soc_dai_digital_mute(dai, 1, substream->stream);
+ if (snd_soc_dai_stream_active(dai, substream->stream) == 1) {
+ if (dai->driver->ops && !dai->driver->ops->mute_unmute_on_trigger)
+ snd_soc_dai_digital_mute(dai, 1, substream->stream);
+ }
}
/* run the stream event */
diff --git a/sound/soc/sof/ipc3-topology.c b/sound/soc/sof/ipc3-topology.c
index ba4ef290b634..2c7a5e7a364c 100644
--- a/sound/soc/sof/ipc3-topology.c
+++ b/sound/soc/sof/ipc3-topology.c
@@ -493,6 +493,7 @@ static int sof_ipc3_widget_setup_comp_mixer(struct snd_sof_widget *swidget)
static int sof_ipc3_widget_setup_comp_pipeline(struct snd_sof_widget *swidget)
{
struct snd_soc_component *scomp = swidget->scomp;
+ struct snd_sof_pipeline *spipe = swidget->spipe;
struct sof_ipc_pipe_new *pipeline;
struct snd_sof_widget *comp_swidget;
int ret;
@@ -545,6 +546,7 @@ static int sof_ipc3_widget_setup_comp_pipeline(struct snd_sof_widget *swidget)
swidget->dynamic_pipeline_widget);
swidget->core = pipeline->core;
+ spipe->core_mask |= BIT(pipeline->core);
return 0;
diff --git a/sound/soc/sof/ipc4-control.c b/sound/soc/sof/ipc4-control.c
index 938efaceb81c..b4cdcec33e12 100644
--- a/sound/soc/sof/ipc4-control.c
+++ b/sound/soc/sof/ipc4-control.c
@@ -89,7 +89,7 @@ sof_ipc4_set_volume_data(struct snd_sof_dev *sdev, struct snd_sof_widget *swidge
struct sof_ipc4_control_data *cdata = scontrol->ipc_control_data;
struct sof_ipc4_gain *gain = swidget->private;
struct sof_ipc4_msg *msg = &cdata->msg;
- struct sof_ipc4_gain_data data;
+ struct sof_ipc4_gain_params params;
bool all_channels_equal = true;
u32 value;
int ret, i;
@@ -109,20 +109,20 @@ sof_ipc4_set_volume_data(struct snd_sof_dev *sdev, struct snd_sof_widget *swidge
*/
for (i = 0; i < scontrol->num_channels; i++) {
if (all_channels_equal) {
- data.channels = SOF_IPC4_GAIN_ALL_CHANNELS_MASK;
- data.init_val = cdata->chanv[0].value;
+ params.channels = SOF_IPC4_GAIN_ALL_CHANNELS_MASK;
+ params.init_val = cdata->chanv[0].value;
} else {
- data.channels = cdata->chanv[i].channel;
- data.init_val = cdata->chanv[i].value;
+ params.channels = cdata->chanv[i].channel;
+ params.init_val = cdata->chanv[i].value;
}
/* set curve type and duration from topology */
- data.curve_duration_l = gain->data.curve_duration_l;
- data.curve_duration_h = gain->data.curve_duration_h;
- data.curve_type = gain->data.curve_type;
+ params.curve_duration_l = gain->data.params.curve_duration_l;
+ params.curve_duration_h = gain->data.params.curve_duration_h;
+ params.curve_type = gain->data.params.curve_type;
- msg->data_ptr = &data;
- msg->data_size = sizeof(data);
+ msg->data_ptr = &params;
+ msg->data_size = sizeof(params);
ret = sof_ipc4_set_get_kcontrol_data(scontrol, true, lock);
msg->data_ptr = NULL;
diff --git a/sound/soc/sof/ipc4-topology.c b/sound/soc/sof/ipc4-topology.c
index b24a64377f68..e012b6e166ac 100644
--- a/sound/soc/sof/ipc4-topology.c
+++ b/sound/soc/sof/ipc4-topology.c
@@ -130,18 +130,18 @@ static const struct sof_topology_token comp_ext_tokens[] = {
static const struct sof_topology_token gain_tokens[] = {
{SOF_TKN_GAIN_RAMP_TYPE, SND_SOC_TPLG_TUPLE_TYPE_WORD,
- get_token_u32, offsetof(struct sof_ipc4_gain_data, curve_type)},
+ get_token_u32, offsetof(struct sof_ipc4_gain_params, curve_type)},
{SOF_TKN_GAIN_RAMP_DURATION,
SND_SOC_TPLG_TUPLE_TYPE_WORD, get_token_u32,
- offsetof(struct sof_ipc4_gain_data, curve_duration_l)},
+ offsetof(struct sof_ipc4_gain_params, curve_duration_l)},
{SOF_TKN_GAIN_VAL, SND_SOC_TPLG_TUPLE_TYPE_WORD,
- get_token_u32, offsetof(struct sof_ipc4_gain_data, init_val)},
+ get_token_u32, offsetof(struct sof_ipc4_gain_params, init_val)},
};
/* SRC */
static const struct sof_topology_token src_tokens[] = {
{SOF_TKN_SRC_RATE_OUT, SND_SOC_TPLG_TUPLE_TYPE_WORD, get_token_u32,
- offsetof(struct sof_ipc4_src, sink_rate)},
+ offsetof(struct sof_ipc4_src_data, sink_rate)},
};
static const struct sof_token_info ipc4_token_list[SOF_TOKEN_COUNT] = {
@@ -656,6 +656,7 @@ static int sof_ipc4_widget_setup_comp_pipeline(struct snd_sof_widget *swidget)
{
struct snd_soc_component *scomp = swidget->scomp;
struct sof_ipc4_pipeline *pipeline;
+ struct snd_sof_pipeline *spipe = swidget->spipe;
int ret;
pipeline = kzalloc(sizeof(*pipeline), GFP_KERNEL);
@@ -670,6 +671,7 @@ static int sof_ipc4_widget_setup_comp_pipeline(struct snd_sof_widget *swidget)
}
swidget->core = pipeline->core_id;
+ spipe->core_mask |= BIT(pipeline->core_id);
if (pipeline->use_chain_dma) {
dev_dbg(scomp->dev, "Set up chain DMA for %s\n", swidget->widget->name);
@@ -718,15 +720,15 @@ static int sof_ipc4_widget_setup_comp_pga(struct snd_sof_widget *swidget)
swidget->private = gain;
- gain->data.channels = SOF_IPC4_GAIN_ALL_CHANNELS_MASK;
- gain->data.init_val = SOF_IPC4_VOL_ZERO_DB;
+ gain->data.params.channels = SOF_IPC4_GAIN_ALL_CHANNELS_MASK;
+ gain->data.params.init_val = SOF_IPC4_VOL_ZERO_DB;
- ret = sof_ipc4_get_audio_fmt(scomp, swidget, &gain->available_fmt, &gain->base_config);
+ ret = sof_ipc4_get_audio_fmt(scomp, swidget, &gain->available_fmt, &gain->data.base_config);
if (ret)
goto err;
- ret = sof_update_ipc_object(scomp, &gain->data, SOF_GAIN_TOKENS, swidget->tuples,
- swidget->num_tuples, sizeof(gain->data), 1);
+ ret = sof_update_ipc_object(scomp, &gain->data.params, SOF_GAIN_TOKENS,
+ swidget->tuples, swidget->num_tuples, sizeof(gain->data), 1);
if (ret) {
dev_err(scomp->dev, "Parsing gain tokens failed\n");
goto err;
@@ -734,8 +736,8 @@ static int sof_ipc4_widget_setup_comp_pga(struct snd_sof_widget *swidget)
dev_dbg(scomp->dev,
"pga widget %s: ramp type: %d, ramp duration %d, initial gain value: %#x\n",
- swidget->widget->name, gain->data.curve_type, gain->data.curve_duration_l,
- gain->data.init_val);
+ swidget->widget->name, gain->data.params.curve_type,
+ gain->data.params.curve_duration_l, gain->data.params.init_val);
ret = sof_ipc4_widget_setup_msg(swidget, &gain->msg);
if (ret)
@@ -797,6 +799,7 @@ err:
static int sof_ipc4_widget_setup_comp_src(struct snd_sof_widget *swidget)
{
struct snd_soc_component *scomp = swidget->scomp;
+ struct snd_sof_pipeline *spipe = swidget->spipe;
struct sof_ipc4_src *src;
int ret;
@@ -808,18 +811,21 @@ static int sof_ipc4_widget_setup_comp_src(struct snd_sof_widget *swidget)
swidget->private = src;
- ret = sof_ipc4_get_audio_fmt(scomp, swidget, &src->available_fmt, &src->base_config);
+ ret = sof_ipc4_get_audio_fmt(scomp, swidget, &src->available_fmt,
+ &src->data.base_config);
if (ret)
goto err;
- ret = sof_update_ipc_object(scomp, src, SOF_SRC_TOKENS, swidget->tuples,
+ ret = sof_update_ipc_object(scomp, &src->data, SOF_SRC_TOKENS, swidget->tuples,
swidget->num_tuples, sizeof(*src), 1);
if (ret) {
dev_err(scomp->dev, "Parsing SRC tokens failed\n");
goto err;
}
- dev_dbg(scomp->dev, "SRC sink rate %d\n", src->sink_rate);
+ spipe->core_mask |= BIT(swidget->core);
+
+ dev_dbg(scomp->dev, "SRC sink rate %d\n", src->data.sink_rate);
ret = sof_ipc4_widget_setup_msg(swidget, &src->msg);
if (ret)
@@ -864,6 +870,7 @@ static int sof_ipc4_widget_setup_comp_process(struct snd_sof_widget *swidget)
{
struct snd_soc_component *scomp = swidget->scomp;
struct sof_ipc4_fw_module *fw_module;
+ struct snd_sof_pipeline *spipe = swidget->spipe;
struct sof_ipc4_process *process;
void *cfg;
int ret;
@@ -920,6 +927,9 @@ static int sof_ipc4_widget_setup_comp_process(struct snd_sof_widget *swidget)
sof_ipc4_widget_update_kcontrol_module_id(swidget);
+ /* set pipeline core mask to keep track of the core the module is scheduled to run on */
+ spipe->core_mask |= BIT(swidget->core);
+
return 0;
free_base_cfg_ext:
kfree(process->base_config_ext);
@@ -1816,7 +1826,7 @@ static int sof_ipc4_prepare_gain_module(struct snd_sof_widget *swidget,
u32 out_ref_rate, out_ref_channels, out_ref_valid_bits;
int ret;
- ret = sof_ipc4_init_input_audio_fmt(sdev, swidget, &gain->base_config,
+ ret = sof_ipc4_init_input_audio_fmt(sdev, swidget, &gain->data.base_config,
pipeline_params, available_fmt);
if (ret < 0)
return ret;
@@ -1826,7 +1836,7 @@ static int sof_ipc4_prepare_gain_module(struct snd_sof_widget *swidget,
out_ref_channels = SOF_IPC4_AUDIO_FORMAT_CFG_CHANNELS_COUNT(in_fmt->fmt_cfg);
out_ref_valid_bits = SOF_IPC4_AUDIO_FORMAT_CFG_V_BIT_DEPTH(in_fmt->fmt_cfg);
- ret = sof_ipc4_init_output_audio_fmt(sdev, &gain->base_config, available_fmt,
+ ret = sof_ipc4_init_output_audio_fmt(sdev, &gain->data.base_config, available_fmt,
out_ref_rate, out_ref_channels, out_ref_valid_bits);
if (ret < 0) {
dev_err(sdev->dev, "Failed to initialize output format for %s",
@@ -1835,7 +1845,7 @@ static int sof_ipc4_prepare_gain_module(struct snd_sof_widget *swidget,
}
/* update pipeline memory usage */
- sof_ipc4_update_resource_usage(sdev, swidget, &gain->base_config);
+ sof_ipc4_update_resource_usage(sdev, swidget, &gain->data.base_config);
return 0;
}
@@ -1891,7 +1901,7 @@ static int sof_ipc4_prepare_src_module(struct snd_sof_widget *swidget,
u32 out_ref_rate, out_ref_channels, out_ref_valid_bits;
int output_format_index, input_format_index;
- input_format_index = sof_ipc4_init_input_audio_fmt(sdev, swidget, &src->base_config,
+ input_format_index = sof_ipc4_init_input_audio_fmt(sdev, swidget, &src->data.base_config,
pipeline_params, available_fmt);
if (input_format_index < 0)
return input_format_index;
@@ -1921,7 +1931,7 @@ static int sof_ipc4_prepare_src_module(struct snd_sof_widget *swidget,
*/
out_ref_rate = params_rate(fe_params);
- output_format_index = sof_ipc4_init_output_audio_fmt(sdev, &src->base_config,
+ output_format_index = sof_ipc4_init_output_audio_fmt(sdev, &src->data.base_config,
available_fmt, out_ref_rate,
out_ref_channels, out_ref_valid_bits);
if (output_format_index < 0) {
@@ -1931,10 +1941,10 @@ static int sof_ipc4_prepare_src_module(struct snd_sof_widget *swidget,
}
/* update pipeline memory usage */
- sof_ipc4_update_resource_usage(sdev, swidget, &src->base_config);
+ sof_ipc4_update_resource_usage(sdev, swidget, &src->data.base_config);
out_audio_fmt = &available_fmt->output_pin_fmts[output_format_index].audio_fmt;
- src->sink_rate = out_audio_fmt->sampling_frequency;
+ src->data.sink_rate = out_audio_fmt->sampling_frequency;
/* update pipeline_params for sink widgets */
return sof_ipc4_update_hw_params(sdev, pipeline_params, out_audio_fmt);
@@ -2314,9 +2324,8 @@ static int sof_ipc4_widget_setup(struct snd_sof_dev *sdev, struct snd_sof_widget
{
struct sof_ipc4_gain *gain = swidget->private;
- ipc_size = sizeof(struct sof_ipc4_base_module_cfg) +
- sizeof(struct sof_ipc4_gain_data);
- ipc_data = gain;
+ ipc_size = sizeof(gain->data);
+ ipc_data = &gain->data;
msg = &gain->msg;
break;
@@ -2335,8 +2344,8 @@ static int sof_ipc4_widget_setup(struct snd_sof_dev *sdev, struct snd_sof_widget
{
struct sof_ipc4_src *src = swidget->private;
- ipc_size = sizeof(struct sof_ipc4_base_module_cfg) + sizeof(src->sink_rate);
- ipc_data = src;
+ ipc_size = sizeof(src->data);
+ ipc_data = &src->data;
msg = &src->msg;
break;
diff --git a/sound/soc/sof/ipc4-topology.h b/sound/soc/sof/ipc4-topology.h
index 0a57b8ab3e08..dce174a190dd 100644
--- a/sound/soc/sof/ipc4-topology.h
+++ b/sound/soc/sof/ipc4-topology.h
@@ -361,7 +361,7 @@ struct sof_ipc4_control_msg_payload {
} __packed;
/**
- * struct sof_ipc4_gain_data - IPC gain blob
+ * struct sof_ipc4_gain_params - IPC gain parameters
* @channels: Channels
* @init_val: Initial value
* @curve_type: Curve type
@@ -369,24 +369,32 @@ struct sof_ipc4_control_msg_payload {
* @curve_duration_l: Curve duration low part
* @curve_duration_h: Curve duration high part
*/
-struct sof_ipc4_gain_data {
+struct sof_ipc4_gain_params {
uint32_t channels;
uint32_t init_val;
uint32_t curve_type;
uint32_t reserved;
uint32_t curve_duration_l;
uint32_t curve_duration_h;
-} __aligned(8);
+} __packed __aligned(4);
/**
- * struct sof_ipc4_gain - gain config data
+ * struct sof_ipc4_gain_data - IPC gain init blob
* @base_config: IPC base config data
+ * @params: Initial parameters for the gain module
+ */
+struct sof_ipc4_gain_data {
+ struct sof_ipc4_base_module_cfg base_config;
+ struct sof_ipc4_gain_params params;
+} __packed __aligned(4);
+
+/**
+ * struct sof_ipc4_gain - gain config data
* @data: IPC gain blob
* @available_fmt: Available audio format
* @msg: message structure for gain
*/
struct sof_ipc4_gain {
- struct sof_ipc4_base_module_cfg base_config;
struct sof_ipc4_gain_data data;
struct sof_ipc4_available_audio_format available_fmt;
struct sof_ipc4_msg msg;
@@ -404,16 +412,24 @@ struct sof_ipc4_mixer {
struct sof_ipc4_msg msg;
};
-/**
- * struct sof_ipc4_src SRC config data
+/*
+ * struct sof_ipc4_src_data - IPC data for SRC
* @base_config: IPC base config data
* @sink_rate: Output rate for sink module
+ */
+struct sof_ipc4_src_data {
+ struct sof_ipc4_base_module_cfg base_config;
+ uint32_t sink_rate;
+} __packed __aligned(4);
+
+/**
+ * struct sof_ipc4_src - SRC config data
+ * @data: IPC base config data
* @available_fmt: Available audio format
* @msg: IPC4 message struct containing header and data info
*/
struct sof_ipc4_src {
- struct sof_ipc4_base_module_cfg base_config;
- uint32_t sink_rate;
+ struct sof_ipc4_src_data data;
struct sof_ipc4_available_audio_format available_fmt;
struct sof_ipc4_msg msg;
};
diff --git a/sound/soc/sof/mediatek/mt8186/mt8186.c b/sound/soc/sof/mediatek/mt8186/mt8186.c
index b69fa788b16f..e0d88e7aa8ca 100644
--- a/sound/soc/sof/mediatek/mt8186/mt8186.c
+++ b/sound/soc/sof/mediatek/mt8186/mt8186.c
@@ -597,6 +597,9 @@ static struct snd_sof_dsp_ops sof_mt8186_ops = {
static struct snd_sof_of_mach sof_mt8186_machs[] = {
{
+ .compatible = "google,steelix",
+ .sof_tplg_filename = "sof-mt8186-google-steelix.tplg"
+ }, {
.compatible = "mediatek,mt8186",
.sof_tplg_filename = "sof-mt8186.tplg",
},
diff --git a/sound/soc/sof/sof-audio.c b/sound/soc/sof/sof-audio.c
index 563fe6f7789f..77cc64ac7113 100644
--- a/sound/soc/sof/sof-audio.c
+++ b/sound/soc/sof/sof-audio.c
@@ -46,6 +46,7 @@ static int sof_widget_free_unlocked(struct snd_sof_dev *sdev,
struct snd_sof_widget *swidget)
{
const struct sof_ipc_tplg_ops *tplg_ops = sof_ipc_get_ops(sdev, tplg);
+ struct snd_sof_pipeline *spipe = swidget->spipe;
struct snd_sof_widget *pipe_widget;
int err = 0;
int ret;
@@ -87,15 +88,22 @@ static int sof_widget_free_unlocked(struct snd_sof_dev *sdev,
}
/*
- * disable widget core. continue to route setup status and complete flag
- * even if this fails and return the appropriate error
+ * decrement ref count for cores associated with all modules in the pipeline and clear
+ * the complete flag
*/
- ret = snd_sof_dsp_core_put(sdev, swidget->core);
- if (ret < 0) {
- dev_err(sdev->dev, "error: failed to disable target core: %d for widget %s\n",
- swidget->core, swidget->widget->name);
- if (!err)
- err = ret;
+ if (swidget->id == snd_soc_dapm_scheduler) {
+ int i;
+
+ for_each_set_bit(i, &spipe->core_mask, sdev->num_cores) {
+ ret = snd_sof_dsp_core_put(sdev, i);
+ if (ret < 0) {
+ dev_err(sdev->dev, "failed to disable target core: %d for pipeline %s\n",
+ i, swidget->widget->name);
+ if (!err)
+ err = ret;
+ }
+ }
+ swidget->spipe->complete = 0;
}
/*
@@ -108,10 +116,6 @@ static int sof_widget_free_unlocked(struct snd_sof_dev *sdev,
err = ret;
}
- /* clear pipeline complete */
- if (swidget->id == snd_soc_dapm_scheduler)
- swidget->spipe->complete = 0;
-
if (!err)
dev_dbg(sdev->dev, "widget %s freed\n", swidget->widget->name);
@@ -134,8 +138,10 @@ static int sof_widget_setup_unlocked(struct snd_sof_dev *sdev,
struct snd_sof_widget *swidget)
{
const struct sof_ipc_tplg_ops *tplg_ops = sof_ipc_get_ops(sdev, tplg);
+ struct snd_sof_pipeline *spipe = swidget->spipe;
bool use_count_decremented = false;
int ret;
+ int i;
/* skip if there is no private data */
if (!swidget->private)
@@ -166,19 +172,23 @@ static int sof_widget_setup_unlocked(struct snd_sof_dev *sdev,
goto use_count_dec;
}
- /* enable widget core */
- ret = snd_sof_dsp_core_get(sdev, swidget->core);
- if (ret < 0) {
- dev_err(sdev->dev, "error: failed to enable target core for widget %s\n",
- swidget->widget->name);
- goto pipe_widget_free;
+ /* update ref count for cores associated with all modules in the pipeline */
+ if (swidget->id == snd_soc_dapm_scheduler) {
+ for_each_set_bit(i, &spipe->core_mask, sdev->num_cores) {
+ ret = snd_sof_dsp_core_get(sdev, i);
+ if (ret < 0) {
+ dev_err(sdev->dev, "failed to enable target core %d for pipeline %s\n",
+ i, swidget->widget->name);
+ goto pipe_widget_free;
+ }
+ }
}
/* setup widget in the DSP */
if (tplg_ops && tplg_ops->widget_setup) {
ret = tplg_ops->widget_setup(sdev, swidget);
if (ret < 0)
- goto core_put;
+ goto pipe_widget_free;
}
/* send config for DAI components */
@@ -208,15 +218,22 @@ static int sof_widget_setup_unlocked(struct snd_sof_dev *sdev,
return 0;
widget_free:
- /* widget use_count and core ref_count will both be decremented by sof_widget_free() */
+ /* widget use_count will be decremented by sof_widget_free() */
sof_widget_free_unlocked(sdev, swidget);
use_count_decremented = true;
-core_put:
- if (!use_count_decremented)
- snd_sof_dsp_core_put(sdev, swidget->core);
pipe_widget_free:
- if (swidget->id != snd_soc_dapm_scheduler)
+ if (swidget->id != snd_soc_dapm_scheduler) {
sof_widget_free_unlocked(sdev, swidget->spipe->pipe_widget);
+ } else {
+ int j;
+
+ /* decrement ref count for all cores that were updated previously */
+ for_each_set_bit(j, &spipe->core_mask, sdev->num_cores) {
+ if (j >= i)
+ break;
+ snd_sof_dsp_core_put(sdev, j);
+ }
+ }
use_count_dec:
if (!use_count_decremented)
swidget->use_count--;
diff --git a/sound/soc/sof/sof-audio.h b/sound/soc/sof/sof-audio.h
index 5d5eeb1a1a6f..a6d6bcd00cee 100644
--- a/sound/soc/sof/sof-audio.h
+++ b/sound/soc/sof/sof-audio.h
@@ -480,6 +480,7 @@ struct snd_sof_widget {
* @paused_count: Count of number of PCM's that have started and have currently paused this
pipeline
* @complete: flag used to indicate that pipeline set up is complete.
+ * @core_mask: Mask containing target cores for all modules in the pipeline
* @list: List item in sdev pipeline_list
*/
struct snd_sof_pipeline {
@@ -487,6 +488,7 @@ struct snd_sof_pipeline {
int started_count;
int paused_count;
int complete;
+ unsigned long core_mask;
struct list_head list;
};
diff --git a/sound/soc/sof/topology.c b/sound/soc/sof/topology.c
index a3a3af252259..37ec671a2d76 100644
--- a/sound/soc/sof/topology.c
+++ b/sound/soc/sof/topology.c
@@ -1736,8 +1736,10 @@ static int sof_dai_load(struct snd_soc_component *scomp, int index,
/* perform pcm set op */
if (ipc_pcm_ops && ipc_pcm_ops->pcm_setup) {
ret = ipc_pcm_ops->pcm_setup(sdev, spcm);
- if (ret < 0)
+ if (ret < 0) {
+ kfree(spcm);
return ret;
+ }
}
dai_drv->dobj.private = spcm;
diff --git a/sound/usb/mixer_quirks.c b/sound/usb/mixer_quirks.c
index 898bc3baca7b..c8d48566e175 100644
--- a/sound/usb/mixer_quirks.c
+++ b/sound/usb/mixer_quirks.c
@@ -2978,6 +2978,7 @@ static int snd_bbfpro_controls_create(struct usb_mixer_interface *mixer)
#define SND_DJM_850_IDX 0x2
#define SND_DJM_900NXS2_IDX 0x3
#define SND_DJM_750MK2_IDX 0x4
+#define SND_DJM_450_IDX 0x5
#define SND_DJM_CTL(_name, suffix, _default_value, _windex) { \
@@ -3108,6 +3109,31 @@ static const struct snd_djm_ctl snd_djm_ctls_250mk2[] = {
};
+// DJM-450
+static const u16 snd_djm_opts_450_cap1[] = {
+ 0x0103, 0x0100, 0x0106, 0x0107, 0x0108, 0x0109, 0x010d, 0x010a };
+
+static const u16 snd_djm_opts_450_cap2[] = {
+ 0x0203, 0x0200, 0x0206, 0x0207, 0x0208, 0x0209, 0x020d, 0x020a };
+
+static const u16 snd_djm_opts_450_cap3[] = {
+ 0x030a, 0x0311, 0x0312, 0x0307, 0x0308, 0x0309, 0x030d };
+
+static const u16 snd_djm_opts_450_pb1[] = { 0x0100, 0x0101, 0x0104 };
+static const u16 snd_djm_opts_450_pb2[] = { 0x0200, 0x0201, 0x0204 };
+static const u16 snd_djm_opts_450_pb3[] = { 0x0300, 0x0301, 0x0304 };
+
+static const struct snd_djm_ctl snd_djm_ctls_450[] = {
+ SND_DJM_CTL("Capture Level", cap_level, 0, SND_DJM_WINDEX_CAPLVL),
+ SND_DJM_CTL("Ch1 Input", 450_cap1, 2, SND_DJM_WINDEX_CAP),
+ SND_DJM_CTL("Ch2 Input", 450_cap2, 2, SND_DJM_WINDEX_CAP),
+ SND_DJM_CTL("Ch3 Input", 450_cap3, 0, SND_DJM_WINDEX_CAP),
+ SND_DJM_CTL("Ch1 Output", 450_pb1, 0, SND_DJM_WINDEX_PB),
+ SND_DJM_CTL("Ch2 Output", 450_pb2, 1, SND_DJM_WINDEX_PB),
+ SND_DJM_CTL("Ch3 Output", 450_pb3, 2, SND_DJM_WINDEX_PB)
+};
+
+
// DJM-750
static const u16 snd_djm_opts_750_cap1[] = {
0x0101, 0x0103, 0x0106, 0x0107, 0x0108, 0x0109, 0x010a, 0x010f };
@@ -3203,6 +3229,7 @@ static const struct snd_djm_device snd_djm_devices[] = {
[SND_DJM_850_IDX] = SND_DJM_DEVICE(850),
[SND_DJM_900NXS2_IDX] = SND_DJM_DEVICE(900nxs2),
[SND_DJM_750MK2_IDX] = SND_DJM_DEVICE(750mk2),
+ [SND_DJM_450_IDX] = SND_DJM_DEVICE(450),
};
@@ -3454,6 +3481,9 @@ int snd_usb_mixer_apply_create_quirk(struct usb_mixer_interface *mixer)
case USB_ID(0x2b73, 0x0017): /* Pioneer DJ DJM-250MK2 */
err = snd_djm_controls_create(mixer, SND_DJM_250MK2_IDX);
break;
+ case USB_ID(0x2b73, 0x0013): /* Pioneer DJ DJM-450 */
+ err = snd_djm_controls_create(mixer, SND_DJM_450_IDX);
+ break;
case USB_ID(0x08e4, 0x017f): /* Pioneer DJ DJM-750 */
err = snd_djm_controls_create(mixer, SND_DJM_750_IDX);
break;
diff --git a/tools/arch/arm64/include/asm/cputype.h b/tools/arch/arm64/include/asm/cputype.h
index 5f6f84837a49..7c7493cb571f 100644
--- a/tools/arch/arm64/include/asm/cputype.h
+++ b/tools/arch/arm64/include/asm/cputype.h
@@ -79,13 +79,15 @@
#define ARM_CPU_PART_CORTEX_A78AE 0xD42
#define ARM_CPU_PART_CORTEX_X1 0xD44
#define ARM_CPU_PART_CORTEX_A510 0xD46
+#define ARM_CPU_PART_CORTEX_A520 0xD80
#define ARM_CPU_PART_CORTEX_A710 0xD47
#define ARM_CPU_PART_CORTEX_A715 0xD4D
#define ARM_CPU_PART_CORTEX_X2 0xD48
#define ARM_CPU_PART_NEOVERSE_N2 0xD49
#define ARM_CPU_PART_CORTEX_A78C 0xD4B
-#define APM_CPU_PART_POTENZA 0x000
+#define APM_CPU_PART_XGENE 0x000
+#define APM_CPU_VAR_POTENZA 0x00
#define CAVIUM_CPU_PART_THUNDERX 0x0A1
#define CAVIUM_CPU_PART_THUNDERX_81XX 0x0A2
@@ -148,6 +150,7 @@
#define MIDR_CORTEX_A78AE MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A78AE)
#define MIDR_CORTEX_X1 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X1)
#define MIDR_CORTEX_A510 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A510)
+#define MIDR_CORTEX_A520 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A520)
#define MIDR_CORTEX_A710 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A710)
#define MIDR_CORTEX_A715 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A715)
#define MIDR_CORTEX_X2 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X2)
diff --git a/tools/arch/arm64/include/uapi/asm/kvm.h b/tools/arch/arm64/include/uapi/asm/kvm.h
index f7ddd73a8c0f..89d2fc872d9f 100644
--- a/tools/arch/arm64/include/uapi/asm/kvm.h
+++ b/tools/arch/arm64/include/uapi/asm/kvm.h
@@ -505,6 +505,38 @@ struct kvm_smccc_filter {
#define KVM_HYPERCALL_EXIT_SMC (1U << 0)
#define KVM_HYPERCALL_EXIT_16BIT (1U << 1)
+/*
+ * Get feature ID registers userspace writable mask.
+ *
+ * From DDI0487J.a, D19.2.66 ("ID_AA64MMFR2_EL1, AArch64 Memory Model
+ * Feature Register 2"):
+ *
+ * "The Feature ID space is defined as the System register space in
+ * AArch64 with op0==3, op1=={0, 1, 3}, CRn==0, CRm=={0-7},
+ * op2=={0-7}."
+ *
+ * This covers all currently known R/O registers that indicate
+ * anything useful feature wise, including the ID registers.
+ *
+ * If we ever need to introduce a new range, it will be described as
+ * such in the range field.
+ */
+#define KVM_ARM_FEATURE_ID_RANGE_IDX(op0, op1, crn, crm, op2) \
+ ({ \
+ __u64 __op1 = (op1) & 3; \
+ __op1 -= (__op1 == 3); \
+ (__op1 << 6 | ((crm) & 7) << 3 | (op2)); \
+ })
+
+#define KVM_ARM_FEATURE_ID_RANGE 0
+#define KVM_ARM_FEATURE_ID_RANGE_SIZE (3 * 8 * 8)
+
+struct reg_mask_range {
+ __u64 addr; /* Pointer to mask array */
+ __u32 range; /* Requested range */
+ __u32 reserved[13];
+};
+
#endif
#endif /* __ARM_KVM_H__ */
diff --git a/tools/arch/arm64/include/uapi/asm/perf_regs.h b/tools/arch/arm64/include/uapi/asm/perf_regs.h
index fd157f46727e..86e556429e0e 100644
--- a/tools/arch/arm64/include/uapi/asm/perf_regs.h
+++ b/tools/arch/arm64/include/uapi/asm/perf_regs.h
@@ -36,11 +36,13 @@ enum perf_event_arm_regs {
PERF_REG_ARM64_LR,
PERF_REG_ARM64_SP,
PERF_REG_ARM64_PC,
+ PERF_REG_ARM64_MAX,
/* Extended/pseudo registers */
- PERF_REG_ARM64_VG = 46, // SVE Vector Granule
-
- PERF_REG_ARM64_MAX = PERF_REG_ARM64_PC + 1,
- PERF_REG_ARM64_EXTENDED_MAX = PERF_REG_ARM64_VG + 1
+ PERF_REG_ARM64_VG = 46, /* SVE Vector Granule */
+ PERF_REG_ARM64_EXTENDED_MAX
};
+
+#define PERF_REG_EXTENDED_MASK (1ULL << PERF_REG_ARM64_VG)
+
#endif /* _ASM_ARM64_PERF_REGS_H */
diff --git a/tools/arch/arm64/tools/Makefile b/tools/arch/arm64/tools/Makefile
index 7f64b8bb5107..7b42feedf647 100644
--- a/tools/arch/arm64/tools/Makefile
+++ b/tools/arch/arm64/tools/Makefile
@@ -22,7 +22,7 @@ endif
arm64_tools_dir = $(top_srcdir)/arch/arm64/tools
arm64_sysreg_tbl = $(arm64_tools_dir)/sysreg
arm64_gen_sysreg = $(arm64_tools_dir)/gen-sysreg.awk
-arm64_generated_dir = $(top_srcdir)/tools/arch/arm64/include/generated
+arm64_generated_dir = $(OUTPUT)arch/arm64/include/generated
arm64_sysreg_defs = $(arm64_generated_dir)/asm/sysreg-defs.h
all: $(arm64_sysreg_defs)
diff --git a/tools/arch/s390/include/uapi/asm/kvm.h b/tools/arch/s390/include/uapi/asm/kvm.h
index a73cf01a1606..abe926d43cbe 100644
--- a/tools/arch/s390/include/uapi/asm/kvm.h
+++ b/tools/arch/s390/include/uapi/asm/kvm.h
@@ -159,6 +159,22 @@ struct kvm_s390_vm_cpu_subfunc {
__u8 reserved[1728];
};
+#define KVM_S390_VM_CPU_PROCESSOR_UV_FEAT_GUEST 6
+#define KVM_S390_VM_CPU_MACHINE_UV_FEAT_GUEST 7
+
+#define KVM_S390_VM_CPU_UV_FEAT_NR_BITS 64
+struct kvm_s390_vm_cpu_uv_feat {
+ union {
+ struct {
+ __u64 : 4;
+ __u64 ap : 1; /* bit 4 */
+ __u64 ap_intr : 1; /* bit 5 */
+ __u64 : 58;
+ };
+ __u64 feat;
+ };
+};
+
/* kvm attributes for crypto */
#define KVM_S390_VM_CRYPTO_ENABLE_AES_KW 0
#define KVM_S390_VM_CRYPTO_ENABLE_DEA_KW 1
diff --git a/tools/arch/x86/include/asm/cpufeatures.h b/tools/arch/x86/include/asm/cpufeatures.h
index 798e60b5454b..4af140cf5719 100644
--- a/tools/arch/x86/include/asm/cpufeatures.h
+++ b/tools/arch/x86/include/asm/cpufeatures.h
@@ -198,7 +198,6 @@
#define X86_FEATURE_CAT_L3 ( 7*32+ 4) /* Cache Allocation Technology L3 */
#define X86_FEATURE_CAT_L2 ( 7*32+ 5) /* Cache Allocation Technology L2 */
#define X86_FEATURE_CDP_L3 ( 7*32+ 6) /* Code and Data Prioritization L3 */
-#define X86_FEATURE_INVPCID_SINGLE ( 7*32+ 7) /* Effectively INVPCID && CR4.PCIDE=1 */
#define X86_FEATURE_HW_PSTATE ( 7*32+ 8) /* AMD HW-PState */
#define X86_FEATURE_PROC_FEEDBACK ( 7*32+ 9) /* AMD ProcFeedbackInterface */
#define X86_FEATURE_XCOMPACTED ( 7*32+10) /* "" Use compacted XSTATE (XSAVES or XSAVEC) */
@@ -308,6 +307,11 @@
#define X86_FEATURE_MSR_TSX_CTRL (11*32+20) /* "" MSR IA32_TSX_CTRL (Intel) implemented */
#define X86_FEATURE_SMBA (11*32+21) /* "" Slow Memory Bandwidth Allocation */
#define X86_FEATURE_BMEC (11*32+22) /* "" Bandwidth Monitoring Event Configuration */
+#define X86_FEATURE_USER_SHSTK (11*32+23) /* Shadow stack support for user mode applications */
+
+#define X86_FEATURE_SRSO (11*32+24) /* "" AMD BTB untrain RETs */
+#define X86_FEATURE_SRSO_ALIAS (11*32+25) /* "" AMD BTB untrain RETs through aliasing */
+#define X86_FEATURE_IBPB_ON_VMEXIT (11*32+26) /* "" Issue an IBPB only on VMEXIT */
/* Intel-defined CPU features, CPUID level 0x00000007:1 (EAX), word 12 */
#define X86_FEATURE_AVX_VNNI (12*32+ 4) /* AVX VNNI instructions */
@@ -380,6 +384,7 @@
#define X86_FEATURE_OSPKE (16*32+ 4) /* OS Protection Keys Enable */
#define X86_FEATURE_WAITPKG (16*32+ 5) /* UMONITOR/UMWAIT/TPAUSE Instructions */
#define X86_FEATURE_AVX512_VBMI2 (16*32+ 6) /* Additional AVX512 Vector Bit Manipulation Instructions */
+#define X86_FEATURE_SHSTK (16*32+ 7) /* "" Shadow stack */
#define X86_FEATURE_GFNI (16*32+ 8) /* Galois Field New Instructions */
#define X86_FEATURE_VAES (16*32+ 9) /* Vector AES */
#define X86_FEATURE_VPCLMULQDQ (16*32+10) /* Carry-Less Multiplication Double Quadword */
@@ -438,11 +443,16 @@
/* AMD-defined Extended Feature 2 EAX, CPUID level 0x80000021 (EAX), word 20 */
#define X86_FEATURE_NO_NESTED_DATA_BP (20*32+ 0) /* "" No Nested Data Breakpoints */
+#define X86_FEATURE_WRMSR_XX_BASE_NS (20*32+ 1) /* "" WRMSR to {FS,GS,KERNEL_GS}_BASE is non-serializing */
#define X86_FEATURE_LFENCE_RDTSC (20*32+ 2) /* "" LFENCE always serializing / synchronizes RDTSC */
#define X86_FEATURE_NULL_SEL_CLR_BASE (20*32+ 6) /* "" Null Selector Clears Base */
#define X86_FEATURE_AUTOIBRS (20*32+ 8) /* "" Automatic IBRS */
#define X86_FEATURE_NO_SMM_CTL_MSR (20*32+ 9) /* "" SMM_CTL MSR is not present */
+#define X86_FEATURE_SBPB (20*32+27) /* "" Selective Branch Prediction Barrier */
+#define X86_FEATURE_IBPB_BRTYPE (20*32+28) /* "" MSR_PRED_CMD[IBPB] flushes all branch type predictions */
+#define X86_FEATURE_SRSO_NO (20*32+29) /* "" CPU is not affected by SRSO */
+
/*
* BUG word(s)
*/
@@ -484,5 +494,9 @@
#define X86_BUG_RETBLEED X86_BUG(27) /* CPU is affected by RETBleed */
#define X86_BUG_EIBRS_PBRSB X86_BUG(28) /* EIBRS is vulnerable to Post Barrier RSB Predictions */
#define X86_BUG_SMT_RSB X86_BUG(29) /* CPU is vulnerable to Cross-Thread Return Address Predictions */
+#define X86_BUG_GDS X86_BUG(30) /* CPU is affected by Gather Data Sampling */
+/* BUG word 2 */
+#define X86_BUG_SRSO X86_BUG(1*32 + 0) /* AMD SRSO bug */
+#define X86_BUG_DIV0 X86_BUG(1*32 + 1) /* AMD DIV0 speculation bug */
#endif /* _ASM_X86_CPUFEATURES_H */
diff --git a/tools/arch/x86/include/asm/disabled-features.h b/tools/arch/x86/include/asm/disabled-features.h
index fafe9be7a6f4..702d93fdd10e 100644
--- a/tools/arch/x86/include/asm/disabled-features.h
+++ b/tools/arch/x86/include/asm/disabled-features.h
@@ -105,6 +105,18 @@
# define DISABLE_TDX_GUEST (1 << (X86_FEATURE_TDX_GUEST & 31))
#endif
+#ifdef CONFIG_X86_USER_SHADOW_STACK
+#define DISABLE_USER_SHSTK 0
+#else
+#define DISABLE_USER_SHSTK (1 << (X86_FEATURE_USER_SHSTK & 31))
+#endif
+
+#ifdef CONFIG_X86_KERNEL_IBT
+#define DISABLE_IBT 0
+#else
+#define DISABLE_IBT (1 << (X86_FEATURE_IBT & 31))
+#endif
+
/*
* Make sure to add features to the correct mask
*/
@@ -120,7 +132,7 @@
#define DISABLED_MASK9 (DISABLE_SGX)
#define DISABLED_MASK10 0
#define DISABLED_MASK11 (DISABLE_RETPOLINE|DISABLE_RETHUNK|DISABLE_UNRET| \
- DISABLE_CALL_DEPTH_TRACKING)
+ DISABLE_CALL_DEPTH_TRACKING|DISABLE_USER_SHSTK)
#define DISABLED_MASK12 (DISABLE_LAM)
#define DISABLED_MASK13 0
#define DISABLED_MASK14 0
@@ -128,7 +140,7 @@
#define DISABLED_MASK16 (DISABLE_PKU|DISABLE_OSPKE|DISABLE_LA57|DISABLE_UMIP| \
DISABLE_ENQCMD)
#define DISABLED_MASK17 0
-#define DISABLED_MASK18 0
+#define DISABLED_MASK18 (DISABLE_IBT)
#define DISABLED_MASK19 0
#define DISABLED_MASK20 0
#define DISABLED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 21)
diff --git a/tools/arch/x86/include/asm/msr-index.h b/tools/arch/x86/include/asm/msr-index.h
index 1d111350197f..1d51e1850ed0 100644
--- a/tools/arch/x86/include/asm/msr-index.h
+++ b/tools/arch/x86/include/asm/msr-index.h
@@ -222,6 +222,7 @@
#define MSR_INTEGRITY_CAPS_ARRAY_BIST BIT(MSR_INTEGRITY_CAPS_ARRAY_BIST_BIT)
#define MSR_INTEGRITY_CAPS_PERIODIC_BIST_BIT 4
#define MSR_INTEGRITY_CAPS_PERIODIC_BIST BIT(MSR_INTEGRITY_CAPS_PERIODIC_BIST_BIT)
+#define MSR_INTEGRITY_CAPS_SAF_GEN_MASK GENMASK_ULL(10, 9)
#define MSR_LBR_NHM_FROM 0x00000680
#define MSR_LBR_NHM_TO 0x000006c0
@@ -553,6 +554,7 @@
#define MSR_AMD64_CPUID_FN_1 0xc0011004
#define MSR_AMD64_LS_CFG 0xc0011020
#define MSR_AMD64_DC_CFG 0xc0011022
+#define MSR_AMD64_TW_CFG 0xc0011023
#define MSR_AMD64_DE_CFG 0xc0011029
#define MSR_AMD64_DE_CFG_LFENCE_SERIALIZE_BIT 1
@@ -637,12 +639,21 @@
/* AMD Last Branch Record MSRs */
#define MSR_AMD64_LBR_SELECT 0xc000010e
+/* Zen4 */
+#define MSR_ZEN4_BP_CFG 0xc001102e
+#define MSR_ZEN4_BP_CFG_SHARED_BTB_FIX_BIT 5
+
+/* Fam 19h MSRs */
+#define MSR_F19H_UMC_PERF_CTL 0xc0010800
+#define MSR_F19H_UMC_PERF_CTR 0xc0010801
+
+/* Zen 2 */
+#define MSR_ZEN2_SPECTRAL_CHICKEN 0xc00110e3
+#define MSR_ZEN2_SPECTRAL_CHICKEN_BIT BIT_ULL(1)
+
/* Fam 17h MSRs */
#define MSR_F17H_IRPERF 0xc00000e9
-#define MSR_ZEN2_SPECTRAL_CHICKEN 0xc00110e3
-#define MSR_ZEN2_SPECTRAL_CHICKEN_BIT BIT_ULL(1)
-
/* Fam 16h MSRs */
#define MSR_F16H_L2I_PERF_CTL 0xc0010230
#define MSR_F16H_L2I_PERF_CTR 0xc0010231
@@ -1112,12 +1123,16 @@
#define MSR_IA32_VMX_MISC_INTEL_PT (1ULL << 14)
#define MSR_IA32_VMX_MISC_VMWRITE_SHADOW_RO_FIELDS (1ULL << 29)
#define MSR_IA32_VMX_MISC_PREEMPTION_TIMER_SCALE 0x1F
-/* AMD-V MSRs */
+/* AMD-V MSRs */
#define MSR_VM_CR 0xc0010114
#define MSR_VM_IGNNE 0xc0010115
#define MSR_VM_HSAVE_PA 0xc0010117
+#define SVM_VM_CR_VALID_MASK 0x001fULL
+#define SVM_VM_CR_SVM_LOCK_MASK 0x0008ULL
+#define SVM_VM_CR_SVM_DIS_MASK 0x0010ULL
+
/* Hardware Feedback Interface */
#define MSR_IA32_HW_FEEDBACK_PTR 0x17d0
#define MSR_IA32_HW_FEEDBACK_CONFIG 0x17d1
diff --git a/tools/arch/x86/include/uapi/asm/prctl.h b/tools/arch/x86/include/uapi/asm/prctl.h
index e8d7ebbca1a4..384e2cc6ac19 100644
--- a/tools/arch/x86/include/uapi/asm/prctl.h
+++ b/tools/arch/x86/include/uapi/asm/prctl.h
@@ -23,9 +23,21 @@
#define ARCH_MAP_VDSO_32 0x2002
#define ARCH_MAP_VDSO_64 0x2003
+/* Don't use 0x3001-0x3004 because of old glibcs */
+
#define ARCH_GET_UNTAG_MASK 0x4001
#define ARCH_ENABLE_TAGGED_ADDR 0x4002
#define ARCH_GET_MAX_TAG_BITS 0x4003
#define ARCH_FORCE_TAGGED_SVA 0x4004
+#define ARCH_SHSTK_ENABLE 0x5001
+#define ARCH_SHSTK_DISABLE 0x5002
+#define ARCH_SHSTK_LOCK 0x5003
+#define ARCH_SHSTK_UNLOCK 0x5004
+#define ARCH_SHSTK_STATUS 0x5005
+
+/* ARCH_SHSTK_ features bits */
+#define ARCH_SHSTK_SHSTK (1ULL << 0)
+#define ARCH_SHSTK_WRSS (1ULL << 1)
+
#endif /* _ASM_X86_PRCTL_H */
diff --git a/tools/include/asm-generic/unaligned.h b/tools/include/asm-generic/unaligned.h
index 156743d399ae..2fd551915c20 100644
--- a/tools/include/asm-generic/unaligned.h
+++ b/tools/include/asm-generic/unaligned.h
@@ -8,6 +8,7 @@
*/
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wpacked"
+#pragma GCC diagnostic ignored "-Wattributes"
#define __get_unaligned_t(type, ptr) ({ \
const struct { type x; } __packed *__pptr = (typeof(__pptr))(ptr); \
diff --git a/tools/include/uapi/asm-generic/unistd.h b/tools/include/uapi/asm-generic/unistd.h
index 76d946445391..756b013fb832 100644
--- a/tools/include/uapi/asm-generic/unistd.h
+++ b/tools/include/uapi/asm-generic/unistd.h
@@ -816,15 +816,21 @@ __SYSCALL(__NR_process_mrelease, sys_process_mrelease)
__SYSCALL(__NR_futex_waitv, sys_futex_waitv)
#define __NR_set_mempolicy_home_node 450
__SYSCALL(__NR_set_mempolicy_home_node, sys_set_mempolicy_home_node)
-
#define __NR_cachestat 451
__SYSCALL(__NR_cachestat, sys_cachestat)
-
#define __NR_fchmodat2 452
__SYSCALL(__NR_fchmodat2, sys_fchmodat2)
+#define __NR_map_shadow_stack 453
+__SYSCALL(__NR_map_shadow_stack, sys_map_shadow_stack)
+#define __NR_futex_wake 454
+__SYSCALL(__NR_futex_wake, sys_futex_wake)
+#define __NR_futex_wait 455
+__SYSCALL(__NR_futex_wait, sys_futex_wait)
+#define __NR_futex_requeue 456
+__SYSCALL(__NR_futex_requeue, sys_futex_requeue)
#undef __NR_syscalls
-#define __NR_syscalls 453
+#define __NR_syscalls 457
/*
* 32 bit systems traditionally used different
diff --git a/tools/include/uapi/drm/drm.h b/tools/include/uapi/drm/drm.h
index 794c1d857677..de723566c5ae 100644
--- a/tools/include/uapi/drm/drm.h
+++ b/tools/include/uapi/drm/drm.h
@@ -1134,6 +1134,26 @@ extern "C" {
#define DRM_IOCTL_MODE_PAGE_FLIP DRM_IOWR(0xB0, struct drm_mode_crtc_page_flip)
#define DRM_IOCTL_MODE_DIRTYFB DRM_IOWR(0xB1, struct drm_mode_fb_dirty_cmd)
+/**
+ * DRM_IOCTL_MODE_CREATE_DUMB - Create a new dumb buffer object.
+ *
+ * KMS dumb buffers provide a very primitive way to allocate a buffer object
+ * suitable for scanout and map it for software rendering. KMS dumb buffers are
+ * not suitable for hardware-accelerated rendering nor video decoding. KMS dumb
+ * buffers are not suitable to be displayed on any other device than the KMS
+ * device where they were allocated from. Also see
+ * :ref:`kms_dumb_buffer_objects`.
+ *
+ * The IOCTL argument is a struct drm_mode_create_dumb.
+ *
+ * User-space is expected to create a KMS dumb buffer via this IOCTL, then add
+ * it as a KMS framebuffer via &DRM_IOCTL_MODE_ADDFB and map it via
+ * &DRM_IOCTL_MODE_MAP_DUMB.
+ *
+ * &DRM_CAP_DUMB_BUFFER indicates whether this IOCTL is supported.
+ * &DRM_CAP_DUMB_PREFERRED_DEPTH and &DRM_CAP_DUMB_PREFER_SHADOW indicate
+ * driver preferences for dumb buffers.
+ */
#define DRM_IOCTL_MODE_CREATE_DUMB DRM_IOWR(0xB2, struct drm_mode_create_dumb)
#define DRM_IOCTL_MODE_MAP_DUMB DRM_IOWR(0xB3, struct drm_mode_map_dumb)
#define DRM_IOCTL_MODE_DESTROY_DUMB DRM_IOWR(0xB4, struct drm_mode_destroy_dumb)
diff --git a/tools/include/uapi/drm/i915_drm.h b/tools/include/uapi/drm/i915_drm.h
index 7000e5910a1d..218edb0a96f8 100644
--- a/tools/include/uapi/drm/i915_drm.h
+++ b/tools/include/uapi/drm/i915_drm.h
@@ -38,13 +38,13 @@ extern "C" {
*/
/**
- * DOC: uevents generated by i915 on it's device node
+ * DOC: uevents generated by i915 on its device node
*
* I915_L3_PARITY_UEVENT - Generated when the driver receives a parity mismatch
- * event from the gpu l3 cache. Additional information supplied is ROW,
+ * event from the GPU L3 cache. Additional information supplied is ROW,
* BANK, SUBBANK, SLICE of the affected cacheline. Userspace should keep
- * track of these events and if a specific cache-line seems to have a
- * persistent error remap it with the l3 remapping tool supplied in
+ * track of these events, and if a specific cache-line seems to have a
+ * persistent error, remap it with the L3 remapping tool supplied in
* intel-gpu-tools. The value supplied with the event is always 1.
*
* I915_ERROR_UEVENT - Generated upon error detection, currently only via
diff --git a/tools/include/uapi/linux/fscrypt.h b/tools/include/uapi/linux/fscrypt.h
index fd1fb0d5389d..7a8f4c290187 100644
--- a/tools/include/uapi/linux/fscrypt.h
+++ b/tools/include/uapi/linux/fscrypt.h
@@ -71,7 +71,8 @@ struct fscrypt_policy_v2 {
__u8 contents_encryption_mode;
__u8 filenames_encryption_mode;
__u8 flags;
- __u8 __reserved[4];
+ __u8 log2_data_unit_size;
+ __u8 __reserved[3];
__u8 master_key_identifier[FSCRYPT_KEY_IDENTIFIER_SIZE];
};
diff --git a/tools/include/uapi/linux/kvm.h b/tools/include/uapi/linux/kvm.h
index f089ab290978..211b86de35ac 100644
--- a/tools/include/uapi/linux/kvm.h
+++ b/tools/include/uapi/linux/kvm.h
@@ -264,6 +264,7 @@ struct kvm_xen_exit {
#define KVM_EXIT_RISCV_SBI 35
#define KVM_EXIT_RISCV_CSR 36
#define KVM_EXIT_NOTIFY 37
+#define KVM_EXIT_LOONGARCH_IOCSR 38
/* For KVM_EXIT_INTERNAL_ERROR */
/* Emulate instruction failed. */
@@ -336,6 +337,13 @@ struct kvm_run {
__u32 len;
__u8 is_write;
} mmio;
+ /* KVM_EXIT_LOONGARCH_IOCSR */
+ struct {
+ __u64 phys_addr;
+ __u8 data[8];
+ __u32 len;
+ __u8 is_write;
+ } iocsr_io;
/* KVM_EXIT_HYPERCALL */
struct {
__u64 nr;
@@ -1192,6 +1200,7 @@ struct kvm_ppc_resize_hpt {
#define KVM_CAP_COUNTER_OFFSET 227
#define KVM_CAP_ARM_EAGER_SPLIT_CHUNK_SIZE 228
#define KVM_CAP_ARM_SUPPORTED_BLOCK_SIZES 229
+#define KVM_CAP_ARM_SUPPORTED_REG_MASK_RANGES 230
#ifdef KVM_CAP_IRQ_ROUTING
@@ -1362,6 +1371,7 @@ struct kvm_dirty_tlb {
#define KVM_REG_ARM64 0x6000000000000000ULL
#define KVM_REG_MIPS 0x7000000000000000ULL
#define KVM_REG_RISCV 0x8000000000000000ULL
+#define KVM_REG_LOONGARCH 0x9000000000000000ULL
#define KVM_REG_SIZE_SHIFT 52
#define KVM_REG_SIZE_MASK 0x00f0000000000000ULL
@@ -1418,9 +1428,16 @@ struct kvm_device_attr {
__u64 addr; /* userspace address of attr data */
};
-#define KVM_DEV_VFIO_GROUP 1
-#define KVM_DEV_VFIO_GROUP_ADD 1
-#define KVM_DEV_VFIO_GROUP_DEL 2
+#define KVM_DEV_VFIO_FILE 1
+
+#define KVM_DEV_VFIO_FILE_ADD 1
+#define KVM_DEV_VFIO_FILE_DEL 2
+
+/* KVM_DEV_VFIO_GROUP aliases are for compile time uapi compatibility */
+#define KVM_DEV_VFIO_GROUP KVM_DEV_VFIO_FILE
+
+#define KVM_DEV_VFIO_GROUP_ADD KVM_DEV_VFIO_FILE_ADD
+#define KVM_DEV_VFIO_GROUP_DEL KVM_DEV_VFIO_FILE_DEL
#define KVM_DEV_VFIO_GROUP_SET_SPAPR_TCE 3
enum kvm_device_type {
@@ -1555,6 +1572,7 @@ struct kvm_s390_ucas_mapping {
#define KVM_ARM_MTE_COPY_TAGS _IOR(KVMIO, 0xb4, struct kvm_arm_copy_mte_tags)
/* Available with KVM_CAP_COUNTER_OFFSET */
#define KVM_ARM_SET_COUNTER_OFFSET _IOW(KVMIO, 0xb5, struct kvm_arm_counter_offset)
+#define KVM_ARM_GET_REG_WRITABLE_MASKS _IOR(KVMIO, 0xb6, struct reg_mask_range)
/* ioctl for vm fd */
#define KVM_CREATE_DEVICE _IOWR(KVMIO, 0xe0, struct kvm_create_device)
diff --git a/tools/include/uapi/linux/mount.h b/tools/include/uapi/linux/mount.h
index 8eb0d7b758d2..bb242fdcfe6b 100644
--- a/tools/include/uapi/linux/mount.h
+++ b/tools/include/uapi/linux/mount.h
@@ -100,8 +100,9 @@ enum fsconfig_command {
FSCONFIG_SET_PATH = 3, /* Set parameter, supplying an object by path */
FSCONFIG_SET_PATH_EMPTY = 4, /* Set parameter, supplying an object by (empty) path */
FSCONFIG_SET_FD = 5, /* Set parameter, supplying an object by fd */
- FSCONFIG_CMD_CREATE = 6, /* Invoke superblock creation */
+ FSCONFIG_CMD_CREATE = 6, /* Create new or reuse existing superblock */
FSCONFIG_CMD_RECONFIGURE = 7, /* Invoke superblock reconfiguration */
+ FSCONFIG_CMD_CREATE_EXCL = 8, /* Create new superblock, fail if reusing existing superblock */
};
/*
diff --git a/tools/include/uapi/linux/vhost.h b/tools/include/uapi/linux/vhost.h
index f5c48b61ab62..649560c685f1 100644
--- a/tools/include/uapi/linux/vhost.h
+++ b/tools/include/uapi/linux/vhost.h
@@ -219,4 +219,12 @@
*/
#define VHOST_VDPA_RESUME _IO(VHOST_VIRTIO, 0x7E)
+/* Get the group for the descriptor table including driver & device areas
+ * of a virtqueue: read index, write group in num.
+ * The virtqueue index is stored in the index field of vhost_vring_state.
+ * The group ID of the descriptor table for this specific virtqueue
+ * is returned via num field of vhost_vring_state.
+ */
+#define VHOST_VDPA_GET_VRING_DESC_GROUP _IOWR(VHOST_VIRTIO, 0x7F, \
+ struct vhost_vring_state)
#endif
diff --git a/tools/net/ynl/generated/devlink-user.c b/tools/net/ynl/generated/devlink-user.c
index c12ca87ca2bb..8e757e249dab 100644
--- a/tools/net/ynl/generated/devlink-user.c
+++ b/tools/net/ynl/generated/devlink-user.c
@@ -2399,6 +2399,7 @@ void devlink_port_set_req_free(struct devlink_port_set_req *req)
int devlink_port_set(struct ynl_sock *ys, struct devlink_port_set_req *req)
{
+ struct ynl_req_state yrs = { .yarg = { .ys = ys, }, };
struct nlmsghdr *nlh;
int err;
@@ -2416,7 +2417,7 @@ int devlink_port_set(struct ynl_sock *ys, struct devlink_port_set_req *req)
if (req->_present.port_function)
devlink_dl_port_function_put(nlh, DEVLINK_ATTR_PORT_FUNCTION, &req->port_function);
- err = ynl_exec(ys, nlh, NULL);
+ err = ynl_exec(ys, nlh, &yrs);
if (err < 0)
return -1;
@@ -2537,6 +2538,7 @@ void devlink_port_del_req_free(struct devlink_port_del_req *req)
int devlink_port_del(struct ynl_sock *ys, struct devlink_port_del_req *req)
{
+ struct ynl_req_state yrs = { .yarg = { .ys = ys, }, };
struct nlmsghdr *nlh;
int err;
@@ -2550,7 +2552,7 @@ int devlink_port_del(struct ynl_sock *ys, struct devlink_port_del_req *req)
if (req->_present.port_index)
mnl_attr_put_u32(nlh, DEVLINK_ATTR_PORT_INDEX, req->port_index);
- err = ynl_exec(ys, nlh, NULL);
+ err = ynl_exec(ys, nlh, &yrs);
if (err < 0)
return -1;
@@ -2568,6 +2570,7 @@ void devlink_port_split_req_free(struct devlink_port_split_req *req)
int devlink_port_split(struct ynl_sock *ys, struct devlink_port_split_req *req)
{
+ struct ynl_req_state yrs = { .yarg = { .ys = ys, }, };
struct nlmsghdr *nlh;
int err;
@@ -2583,7 +2586,7 @@ int devlink_port_split(struct ynl_sock *ys, struct devlink_port_split_req *req)
if (req->_present.port_split_count)
mnl_attr_put_u32(nlh, DEVLINK_ATTR_PORT_SPLIT_COUNT, req->port_split_count);
- err = ynl_exec(ys, nlh, NULL);
+ err = ynl_exec(ys, nlh, &yrs);
if (err < 0)
return -1;
@@ -2602,6 +2605,7 @@ void devlink_port_unsplit_req_free(struct devlink_port_unsplit_req *req)
int devlink_port_unsplit(struct ynl_sock *ys,
struct devlink_port_unsplit_req *req)
{
+ struct ynl_req_state yrs = { .yarg = { .ys = ys, }, };
struct nlmsghdr *nlh;
int err;
@@ -2615,7 +2619,7 @@ int devlink_port_unsplit(struct ynl_sock *ys,
if (req->_present.port_index)
mnl_attr_put_u32(nlh, DEVLINK_ATTR_PORT_INDEX, req->port_index);
- err = ynl_exec(ys, nlh, NULL);
+ err = ynl_exec(ys, nlh, &yrs);
if (err < 0)
return -1;
@@ -2926,6 +2930,7 @@ void devlink_sb_pool_set_req_free(struct devlink_sb_pool_set_req *req)
int devlink_sb_pool_set(struct ynl_sock *ys,
struct devlink_sb_pool_set_req *req)
{
+ struct ynl_req_state yrs = { .yarg = { .ys = ys, }, };
struct nlmsghdr *nlh;
int err;
@@ -2945,7 +2950,7 @@ int devlink_sb_pool_set(struct ynl_sock *ys,
if (req->_present.sb_pool_size)
mnl_attr_put_u32(nlh, DEVLINK_ATTR_SB_POOL_SIZE, req->sb_pool_size);
- err = ynl_exec(ys, nlh, NULL);
+ err = ynl_exec(ys, nlh, &yrs);
if (err < 0)
return -1;
@@ -3126,6 +3131,7 @@ devlink_sb_port_pool_set_req_free(struct devlink_sb_port_pool_set_req *req)
int devlink_sb_port_pool_set(struct ynl_sock *ys,
struct devlink_sb_port_pool_set_req *req)
{
+ struct ynl_req_state yrs = { .yarg = { .ys = ys, }, };
struct nlmsghdr *nlh;
int err;
@@ -3145,7 +3151,7 @@ int devlink_sb_port_pool_set(struct ynl_sock *ys,
if (req->_present.sb_threshold)
mnl_attr_put_u32(nlh, DEVLINK_ATTR_SB_THRESHOLD, req->sb_threshold);
- err = ynl_exec(ys, nlh, NULL);
+ err = ynl_exec(ys, nlh, &yrs);
if (err < 0)
return -1;
@@ -3334,6 +3340,7 @@ devlink_sb_tc_pool_bind_set_req_free(struct devlink_sb_tc_pool_bind_set_req *req
int devlink_sb_tc_pool_bind_set(struct ynl_sock *ys,
struct devlink_sb_tc_pool_bind_set_req *req)
{
+ struct ynl_req_state yrs = { .yarg = { .ys = ys, }, };
struct nlmsghdr *nlh;
int err;
@@ -3357,7 +3364,7 @@ int devlink_sb_tc_pool_bind_set(struct ynl_sock *ys,
if (req->_present.sb_threshold)
mnl_attr_put_u32(nlh, DEVLINK_ATTR_SB_THRESHOLD, req->sb_threshold);
- err = ynl_exec(ys, nlh, NULL);
+ err = ynl_exec(ys, nlh, &yrs);
if (err < 0)
return -1;
@@ -3376,6 +3383,7 @@ void devlink_sb_occ_snapshot_req_free(struct devlink_sb_occ_snapshot_req *req)
int devlink_sb_occ_snapshot(struct ynl_sock *ys,
struct devlink_sb_occ_snapshot_req *req)
{
+ struct ynl_req_state yrs = { .yarg = { .ys = ys, }, };
struct nlmsghdr *nlh;
int err;
@@ -3389,7 +3397,7 @@ int devlink_sb_occ_snapshot(struct ynl_sock *ys,
if (req->_present.sb_index)
mnl_attr_put_u32(nlh, DEVLINK_ATTR_SB_INDEX, req->sb_index);
- err = ynl_exec(ys, nlh, NULL);
+ err = ynl_exec(ys, nlh, &yrs);
if (err < 0)
return -1;
@@ -3409,6 +3417,7 @@ devlink_sb_occ_max_clear_req_free(struct devlink_sb_occ_max_clear_req *req)
int devlink_sb_occ_max_clear(struct ynl_sock *ys,
struct devlink_sb_occ_max_clear_req *req)
{
+ struct ynl_req_state yrs = { .yarg = { .ys = ys, }, };
struct nlmsghdr *nlh;
int err;
@@ -3422,7 +3431,7 @@ int devlink_sb_occ_max_clear(struct ynl_sock *ys,
if (req->_present.sb_index)
mnl_attr_put_u32(nlh, DEVLINK_ATTR_SB_INDEX, req->sb_index);
- err = ynl_exec(ys, nlh, NULL);
+ err = ynl_exec(ys, nlh, &yrs);
if (err < 0)
return -1;
@@ -3544,6 +3553,7 @@ void devlink_eswitch_set_req_free(struct devlink_eswitch_set_req *req)
int devlink_eswitch_set(struct ynl_sock *ys,
struct devlink_eswitch_set_req *req)
{
+ struct ynl_req_state yrs = { .yarg = { .ys = ys, }, };
struct nlmsghdr *nlh;
int err;
@@ -3561,7 +3571,7 @@ int devlink_eswitch_set(struct ynl_sock *ys,
if (req->_present.eswitch_encap_mode)
mnl_attr_put_u8(nlh, DEVLINK_ATTR_ESWITCH_ENCAP_MODE, req->eswitch_encap_mode);
- err = ynl_exec(ys, nlh, NULL);
+ err = ynl_exec(ys, nlh, &yrs);
if (err < 0)
return -1;
@@ -3895,6 +3905,7 @@ devlink_dpipe_table_counters_set_req_free(struct devlink_dpipe_table_counters_se
int devlink_dpipe_table_counters_set(struct ynl_sock *ys,
struct devlink_dpipe_table_counters_set_req *req)
{
+ struct ynl_req_state yrs = { .yarg = { .ys = ys, }, };
struct nlmsghdr *nlh;
int err;
@@ -3910,7 +3921,7 @@ int devlink_dpipe_table_counters_set(struct ynl_sock *ys,
if (req->_present.dpipe_table_counters_enabled)
mnl_attr_put_u8(nlh, DEVLINK_ATTR_DPIPE_TABLE_COUNTERS_ENABLED, req->dpipe_table_counters_enabled);
- err = ynl_exec(ys, nlh, NULL);
+ err = ynl_exec(ys, nlh, &yrs);
if (err < 0)
return -1;
@@ -3929,6 +3940,7 @@ void devlink_resource_set_req_free(struct devlink_resource_set_req *req)
int devlink_resource_set(struct ynl_sock *ys,
struct devlink_resource_set_req *req)
{
+ struct ynl_req_state yrs = { .yarg = { .ys = ys, }, };
struct nlmsghdr *nlh;
int err;
@@ -3944,7 +3956,7 @@ int devlink_resource_set(struct ynl_sock *ys,
if (req->_present.resource_size)
mnl_attr_put_u64(nlh, DEVLINK_ATTR_RESOURCE_SIZE, req->resource_size);
- err = ynl_exec(ys, nlh, NULL);
+ err = ynl_exec(ys, nlh, &yrs);
if (err < 0)
return -1;
@@ -4319,6 +4331,7 @@ void devlink_param_set_req_free(struct devlink_param_set_req *req)
int devlink_param_set(struct ynl_sock *ys, struct devlink_param_set_req *req)
{
+ struct ynl_req_state yrs = { .yarg = { .ys = ys, }, };
struct nlmsghdr *nlh;
int err;
@@ -4336,7 +4349,7 @@ int devlink_param_set(struct ynl_sock *ys, struct devlink_param_set_req *req)
if (req->_present.param_value_cmode)
mnl_attr_put_u8(nlh, DEVLINK_ATTR_PARAM_VALUE_CMODE, req->param_value_cmode);
- err = ynl_exec(ys, nlh, NULL);
+ err = ynl_exec(ys, nlh, &yrs);
if (err < 0)
return -1;
@@ -4631,6 +4644,7 @@ void devlink_region_del_req_free(struct devlink_region_del_req *req)
int devlink_region_del(struct ynl_sock *ys, struct devlink_region_del_req *req)
{
+ struct ynl_req_state yrs = { .yarg = { .ys = ys, }, };
struct nlmsghdr *nlh;
int err;
@@ -4648,7 +4662,7 @@ int devlink_region_del(struct ynl_sock *ys, struct devlink_region_del_req *req)
if (req->_present.region_snapshot_id)
mnl_attr_put_u32(nlh, DEVLINK_ATTR_REGION_SNAPSHOT_ID, req->region_snapshot_id);
- err = ynl_exec(ys, nlh, NULL);
+ err = ynl_exec(ys, nlh, &yrs);
if (err < 0)
return -1;
@@ -4922,6 +4936,7 @@ void devlink_port_param_set_req_free(struct devlink_port_param_set_req *req)
int devlink_port_param_set(struct ynl_sock *ys,
struct devlink_port_param_set_req *req)
{
+ struct ynl_req_state yrs = { .yarg = { .ys = ys, }, };
struct nlmsghdr *nlh;
int err;
@@ -4935,7 +4950,7 @@ int devlink_port_param_set(struct ynl_sock *ys,
if (req->_present.port_index)
mnl_attr_put_u32(nlh, DEVLINK_ATTR_PORT_INDEX, req->port_index);
- err = ynl_exec(ys, nlh, NULL);
+ err = ynl_exec(ys, nlh, &yrs);
if (err < 0)
return -1;
@@ -5360,6 +5375,7 @@ devlink_health_reporter_set_req_free(struct devlink_health_reporter_set_req *req
int devlink_health_reporter_set(struct ynl_sock *ys,
struct devlink_health_reporter_set_req *req)
{
+ struct ynl_req_state yrs = { .yarg = { .ys = ys, }, };
struct nlmsghdr *nlh;
int err;
@@ -5381,7 +5397,7 @@ int devlink_health_reporter_set(struct ynl_sock *ys,
if (req->_present.health_reporter_auto_dump)
mnl_attr_put_u8(nlh, DEVLINK_ATTR_HEALTH_REPORTER_AUTO_DUMP, req->health_reporter_auto_dump);
- err = ynl_exec(ys, nlh, NULL);
+ err = ynl_exec(ys, nlh, &yrs);
if (err < 0)
return -1;
@@ -5402,6 +5418,7 @@ devlink_health_reporter_recover_req_free(struct devlink_health_reporter_recover_
int devlink_health_reporter_recover(struct ynl_sock *ys,
struct devlink_health_reporter_recover_req *req)
{
+ struct ynl_req_state yrs = { .yarg = { .ys = ys, }, };
struct nlmsghdr *nlh;
int err;
@@ -5417,7 +5434,7 @@ int devlink_health_reporter_recover(struct ynl_sock *ys,
if (req->_present.health_reporter_name_len)
mnl_attr_put_strz(nlh, DEVLINK_ATTR_HEALTH_REPORTER_NAME, req->health_reporter_name);
- err = ynl_exec(ys, nlh, NULL);
+ err = ynl_exec(ys, nlh, &yrs);
if (err < 0)
return -1;
@@ -5438,6 +5455,7 @@ devlink_health_reporter_diagnose_req_free(struct devlink_health_reporter_diagnos
int devlink_health_reporter_diagnose(struct ynl_sock *ys,
struct devlink_health_reporter_diagnose_req *req)
{
+ struct ynl_req_state yrs = { .yarg = { .ys = ys, }, };
struct nlmsghdr *nlh;
int err;
@@ -5453,7 +5471,7 @@ int devlink_health_reporter_diagnose(struct ynl_sock *ys,
if (req->_present.health_reporter_name_len)
mnl_attr_put_strz(nlh, DEVLINK_ATTR_HEALTH_REPORTER_NAME, req->health_reporter_name);
- err = ynl_exec(ys, nlh, NULL);
+ err = ynl_exec(ys, nlh, &yrs);
if (err < 0)
return -1;
@@ -5556,6 +5574,7 @@ devlink_health_reporter_dump_clear_req_free(struct devlink_health_reporter_dump_
int devlink_health_reporter_dump_clear(struct ynl_sock *ys,
struct devlink_health_reporter_dump_clear_req *req)
{
+ struct ynl_req_state yrs = { .yarg = { .ys = ys, }, };
struct nlmsghdr *nlh;
int err;
@@ -5571,7 +5590,7 @@ int devlink_health_reporter_dump_clear(struct ynl_sock *ys,
if (req->_present.health_reporter_name_len)
mnl_attr_put_strz(nlh, DEVLINK_ATTR_HEALTH_REPORTER_NAME, req->health_reporter_name);
- err = ynl_exec(ys, nlh, NULL);
+ err = ynl_exec(ys, nlh, &yrs);
if (err < 0)
return -1;
@@ -5592,6 +5611,7 @@ void devlink_flash_update_req_free(struct devlink_flash_update_req *req)
int devlink_flash_update(struct ynl_sock *ys,
struct devlink_flash_update_req *req)
{
+ struct ynl_req_state yrs = { .yarg = { .ys = ys, }, };
struct nlmsghdr *nlh;
int err;
@@ -5609,7 +5629,7 @@ int devlink_flash_update(struct ynl_sock *ys,
if (req->_present.flash_update_overwrite_mask)
mnl_attr_put(nlh, DEVLINK_ATTR_FLASH_UPDATE_OVERWRITE_MASK, sizeof(struct nla_bitfield32), &req->flash_update_overwrite_mask);
- err = ynl_exec(ys, nlh, NULL);
+ err = ynl_exec(ys, nlh, &yrs);
if (err < 0)
return -1;
@@ -5780,6 +5800,7 @@ void devlink_trap_set_req_free(struct devlink_trap_set_req *req)
int devlink_trap_set(struct ynl_sock *ys, struct devlink_trap_set_req *req)
{
+ struct ynl_req_state yrs = { .yarg = { .ys = ys, }, };
struct nlmsghdr *nlh;
int err;
@@ -5795,7 +5816,7 @@ int devlink_trap_set(struct ynl_sock *ys, struct devlink_trap_set_req *req)
if (req->_present.trap_action)
mnl_attr_put_u8(nlh, DEVLINK_ATTR_TRAP_ACTION, req->trap_action);
- err = ynl_exec(ys, nlh, NULL);
+ err = ynl_exec(ys, nlh, &yrs);
if (err < 0)
return -1;
@@ -5968,6 +5989,7 @@ void devlink_trap_group_set_req_free(struct devlink_trap_group_set_req *req)
int devlink_trap_group_set(struct ynl_sock *ys,
struct devlink_trap_group_set_req *req)
{
+ struct ynl_req_state yrs = { .yarg = { .ys = ys, }, };
struct nlmsghdr *nlh;
int err;
@@ -5985,7 +6007,7 @@ int devlink_trap_group_set(struct ynl_sock *ys,
if (req->_present.trap_policer_id)
mnl_attr_put_u32(nlh, DEVLINK_ATTR_TRAP_POLICER_ID, req->trap_policer_id);
- err = ynl_exec(ys, nlh, NULL);
+ err = ynl_exec(ys, nlh, &yrs);
if (err < 0)
return -1;
@@ -6152,6 +6174,7 @@ devlink_trap_policer_set_req_free(struct devlink_trap_policer_set_req *req)
int devlink_trap_policer_set(struct ynl_sock *ys,
struct devlink_trap_policer_set_req *req)
{
+ struct ynl_req_state yrs = { .yarg = { .ys = ys, }, };
struct nlmsghdr *nlh;
int err;
@@ -6169,7 +6192,7 @@ int devlink_trap_policer_set(struct ynl_sock *ys,
if (req->_present.trap_policer_burst)
mnl_attr_put_u64(nlh, DEVLINK_ATTR_TRAP_POLICER_BURST, req->trap_policer_burst);
- err = ynl_exec(ys, nlh, NULL);
+ err = ynl_exec(ys, nlh, &yrs);
if (err < 0)
return -1;
@@ -6190,6 +6213,7 @@ devlink_health_reporter_test_req_free(struct devlink_health_reporter_test_req *r
int devlink_health_reporter_test(struct ynl_sock *ys,
struct devlink_health_reporter_test_req *req)
{
+ struct ynl_req_state yrs = { .yarg = { .ys = ys, }, };
struct nlmsghdr *nlh;
int err;
@@ -6205,7 +6229,7 @@ int devlink_health_reporter_test(struct ynl_sock *ys,
if (req->_present.health_reporter_name_len)
mnl_attr_put_strz(nlh, DEVLINK_ATTR_HEALTH_REPORTER_NAME, req->health_reporter_name);
- err = ynl_exec(ys, nlh, NULL);
+ err = ynl_exec(ys, nlh, &yrs);
if (err < 0)
return -1;
@@ -6384,6 +6408,7 @@ void devlink_rate_set_req_free(struct devlink_rate_set_req *req)
int devlink_rate_set(struct ynl_sock *ys, struct devlink_rate_set_req *req)
{
+ struct ynl_req_state yrs = { .yarg = { .ys = ys, }, };
struct nlmsghdr *nlh;
int err;
@@ -6407,7 +6432,7 @@ int devlink_rate_set(struct ynl_sock *ys, struct devlink_rate_set_req *req)
if (req->_present.rate_parent_node_name_len)
mnl_attr_put_strz(nlh, DEVLINK_ATTR_RATE_PARENT_NODE_NAME, req->rate_parent_node_name);
- err = ynl_exec(ys, nlh, NULL);
+ err = ynl_exec(ys, nlh, &yrs);
if (err < 0)
return -1;
@@ -6427,6 +6452,7 @@ void devlink_rate_new_req_free(struct devlink_rate_new_req *req)
int devlink_rate_new(struct ynl_sock *ys, struct devlink_rate_new_req *req)
{
+ struct ynl_req_state yrs = { .yarg = { .ys = ys, }, };
struct nlmsghdr *nlh;
int err;
@@ -6450,7 +6476,7 @@ int devlink_rate_new(struct ynl_sock *ys, struct devlink_rate_new_req *req)
if (req->_present.rate_parent_node_name_len)
mnl_attr_put_strz(nlh, DEVLINK_ATTR_RATE_PARENT_NODE_NAME, req->rate_parent_node_name);
- err = ynl_exec(ys, nlh, NULL);
+ err = ynl_exec(ys, nlh, &yrs);
if (err < 0)
return -1;
@@ -6469,6 +6495,7 @@ void devlink_rate_del_req_free(struct devlink_rate_del_req *req)
int devlink_rate_del(struct ynl_sock *ys, struct devlink_rate_del_req *req)
{
+ struct ynl_req_state yrs = { .yarg = { .ys = ys, }, };
struct nlmsghdr *nlh;
int err;
@@ -6482,7 +6509,7 @@ int devlink_rate_del(struct ynl_sock *ys, struct devlink_rate_del_req *req)
if (req->_present.rate_node_name_len)
mnl_attr_put_strz(nlh, DEVLINK_ATTR_RATE_NODE_NAME, req->rate_node_name);
- err = ynl_exec(ys, nlh, NULL);
+ err = ynl_exec(ys, nlh, &yrs);
if (err < 0)
return -1;
@@ -6645,6 +6672,7 @@ void devlink_linecard_set_req_free(struct devlink_linecard_set_req *req)
int devlink_linecard_set(struct ynl_sock *ys,
struct devlink_linecard_set_req *req)
{
+ struct ynl_req_state yrs = { .yarg = { .ys = ys, }, };
struct nlmsghdr *nlh;
int err;
@@ -6660,7 +6688,7 @@ int devlink_linecard_set(struct ynl_sock *ys,
if (req->_present.linecard_type_len)
mnl_attr_put_strz(nlh, DEVLINK_ATTR_LINECARD_TYPE, req->linecard_type);
- err = ynl_exec(ys, nlh, NULL);
+ err = ynl_exec(ys, nlh, &yrs);
if (err < 0)
return -1;
@@ -6810,6 +6838,7 @@ void devlink_selftests_run_req_free(struct devlink_selftests_run_req *req)
int devlink_selftests_run(struct ynl_sock *ys,
struct devlink_selftests_run_req *req)
{
+ struct ynl_req_state yrs = { .yarg = { .ys = ys, }, };
struct nlmsghdr *nlh;
int err;
@@ -6823,7 +6852,7 @@ int devlink_selftests_run(struct ynl_sock *ys,
if (req->_present.selftests)
devlink_dl_selftest_id_put(nlh, DEVLINK_ATTR_SELFTESTS, &req->selftests);
- err = ynl_exec(ys, nlh, NULL);
+ err = ynl_exec(ys, nlh, &yrs);
if (err < 0)
return -1;
diff --git a/tools/net/ynl/generated/ethtool-user.c b/tools/net/ynl/generated/ethtool-user.c
index 74b883a14958..660435639e2b 100644
--- a/tools/net/ynl/generated/ethtool-user.c
+++ b/tools/net/ynl/generated/ethtool-user.c
@@ -1843,6 +1843,7 @@ void ethtool_linkinfo_set_req_free(struct ethtool_linkinfo_set_req *req)
int ethtool_linkinfo_set(struct ynl_sock *ys,
struct ethtool_linkinfo_set_req *req)
{
+ struct ynl_req_state yrs = { .yarg = { .ys = ys, }, };
struct nlmsghdr *nlh;
int err;
@@ -1862,7 +1863,7 @@ int ethtool_linkinfo_set(struct ynl_sock *ys,
if (req->_present.transceiver)
mnl_attr_put_u8(nlh, ETHTOOL_A_LINKINFO_TRANSCEIVER, req->transceiver);
- err = ynl_exec(ys, nlh, NULL);
+ err = ynl_exec(ys, nlh, &yrs);
if (err < 0)
return -1;
@@ -2067,6 +2068,7 @@ void ethtool_linkmodes_set_req_free(struct ethtool_linkmodes_set_req *req)
int ethtool_linkmodes_set(struct ynl_sock *ys,
struct ethtool_linkmodes_set_req *req)
{
+ struct ynl_req_state yrs = { .yarg = { .ys = ys, }, };
struct nlmsghdr *nlh;
int err;
@@ -2094,7 +2096,7 @@ int ethtool_linkmodes_set(struct ynl_sock *ys,
if (req->_present.rate_matching)
mnl_attr_put_u8(nlh, ETHTOOL_A_LINKMODES_RATE_MATCHING, req->rate_matching);
- err = ynl_exec(ys, nlh, NULL);
+ err = ynl_exec(ys, nlh, &yrs);
if (err < 0)
return -1;
@@ -2398,6 +2400,7 @@ void ethtool_debug_set_req_free(struct ethtool_debug_set_req *req)
int ethtool_debug_set(struct ynl_sock *ys, struct ethtool_debug_set_req *req)
{
+ struct ynl_req_state yrs = { .yarg = { .ys = ys, }, };
struct nlmsghdr *nlh;
int err;
@@ -2409,7 +2412,7 @@ int ethtool_debug_set(struct ynl_sock *ys, struct ethtool_debug_set_req *req)
if (req->_present.msgmask)
ethtool_bitset_put(nlh, ETHTOOL_A_DEBUG_MSGMASK, &req->msgmask);
- err = ynl_exec(ys, nlh, NULL);
+ err = ynl_exec(ys, nlh, &yrs);
if (err < 0)
return -1;
@@ -2577,6 +2580,7 @@ void ethtool_wol_set_req_free(struct ethtool_wol_set_req *req)
int ethtool_wol_set(struct ynl_sock *ys, struct ethtool_wol_set_req *req)
{
+ struct ynl_req_state yrs = { .yarg = { .ys = ys, }, };
struct nlmsghdr *nlh;
int err;
@@ -2590,7 +2594,7 @@ int ethtool_wol_set(struct ynl_sock *ys, struct ethtool_wol_set_req *req)
if (req->_present.sopass_len)
mnl_attr_put(nlh, ETHTOOL_A_WOL_SOPASS, req->_present.sopass_len, req->sopass);
- err = ynl_exec(ys, nlh, NULL);
+ err = ynl_exec(ys, nlh, &yrs);
if (err < 0)
return -1;
@@ -3045,6 +3049,7 @@ void ethtool_privflags_set_req_free(struct ethtool_privflags_set_req *req)
int ethtool_privflags_set(struct ynl_sock *ys,
struct ethtool_privflags_set_req *req)
{
+ struct ynl_req_state yrs = { .yarg = { .ys = ys, }, };
struct nlmsghdr *nlh;
int err;
@@ -3056,7 +3061,7 @@ int ethtool_privflags_set(struct ynl_sock *ys,
if (req->_present.flags)
ethtool_bitset_put(nlh, ETHTOOL_A_PRIVFLAGS_FLAGS, &req->flags);
- err = ynl_exec(ys, nlh, NULL);
+ err = ynl_exec(ys, nlh, &yrs);
if (err < 0)
return -1;
@@ -3273,6 +3278,7 @@ void ethtool_rings_set_req_free(struct ethtool_rings_set_req *req)
int ethtool_rings_set(struct ynl_sock *ys, struct ethtool_rings_set_req *req)
{
+ struct ynl_req_state yrs = { .yarg = { .ys = ys, }, };
struct nlmsghdr *nlh;
int err;
@@ -3312,7 +3318,7 @@ int ethtool_rings_set(struct ynl_sock *ys, struct ethtool_rings_set_req *req)
if (req->_present.tx_push_buf_len_max)
mnl_attr_put_u32(nlh, ETHTOOL_A_RINGS_TX_PUSH_BUF_LEN_MAX, req->tx_push_buf_len_max);
- err = ynl_exec(ys, nlh, NULL);
+ err = ynl_exec(ys, nlh, &yrs);
if (err < 0)
return -1;
@@ -3495,6 +3501,7 @@ void ethtool_channels_set_req_free(struct ethtool_channels_set_req *req)
int ethtool_channels_set(struct ynl_sock *ys,
struct ethtool_channels_set_req *req)
{
+ struct ynl_req_state yrs = { .yarg = { .ys = ys, }, };
struct nlmsghdr *nlh;
int err;
@@ -3520,7 +3527,7 @@ int ethtool_channels_set(struct ynl_sock *ys,
if (req->_present.combined_count)
mnl_attr_put_u32(nlh, ETHTOOL_A_CHANNELS_COMBINED_COUNT, req->combined_count);
- err = ynl_exec(ys, nlh, NULL);
+ err = ynl_exec(ys, nlh, &yrs);
if (err < 0)
return -1;
@@ -3798,6 +3805,7 @@ void ethtool_coalesce_set_req_free(struct ethtool_coalesce_set_req *req)
int ethtool_coalesce_set(struct ynl_sock *ys,
struct ethtool_coalesce_set_req *req)
{
+ struct ynl_req_state yrs = { .yarg = { .ys = ys, }, };
struct nlmsghdr *nlh;
int err;
@@ -3861,7 +3869,7 @@ int ethtool_coalesce_set(struct ynl_sock *ys,
if (req->_present.tx_aggr_time_usecs)
mnl_attr_put_u32(nlh, ETHTOOL_A_COALESCE_TX_AGGR_TIME_USECS, req->tx_aggr_time_usecs);
- err = ynl_exec(ys, nlh, NULL);
+ err = ynl_exec(ys, nlh, &yrs);
if (err < 0)
return -1;
@@ -4036,6 +4044,7 @@ void ethtool_pause_set_req_free(struct ethtool_pause_set_req *req)
int ethtool_pause_set(struct ynl_sock *ys, struct ethtool_pause_set_req *req)
{
+ struct ynl_req_state yrs = { .yarg = { .ys = ys, }, };
struct nlmsghdr *nlh;
int err;
@@ -4055,7 +4064,7 @@ int ethtool_pause_set(struct ynl_sock *ys, struct ethtool_pause_set_req *req)
if (req->_present.stats_src)
mnl_attr_put_u32(nlh, ETHTOOL_A_PAUSE_STATS_SRC, req->stats_src);
- err = ynl_exec(ys, nlh, NULL);
+ err = ynl_exec(ys, nlh, &yrs);
if (err < 0)
return -1;
@@ -4242,6 +4251,7 @@ void ethtool_eee_set_req_free(struct ethtool_eee_set_req *req)
int ethtool_eee_set(struct ynl_sock *ys, struct ethtool_eee_set_req *req)
{
+ struct ynl_req_state yrs = { .yarg = { .ys = ys, }, };
struct nlmsghdr *nlh;
int err;
@@ -4263,7 +4273,7 @@ int ethtool_eee_set(struct ynl_sock *ys, struct ethtool_eee_set_req *req)
if (req->_present.tx_lpi_timer)
mnl_attr_put_u32(nlh, ETHTOOL_A_EEE_TX_LPI_TIMER, req->tx_lpi_timer);
- err = ynl_exec(ys, nlh, NULL);
+ err = ynl_exec(ys, nlh, &yrs);
if (err < 0)
return -1;
@@ -4437,6 +4447,7 @@ void ethtool_cable_test_act_req_free(struct ethtool_cable_test_act_req *req)
int ethtool_cable_test_act(struct ynl_sock *ys,
struct ethtool_cable_test_act_req *req)
{
+ struct ynl_req_state yrs = { .yarg = { .ys = ys, }, };
struct nlmsghdr *nlh;
int err;
@@ -4446,7 +4457,7 @@ int ethtool_cable_test_act(struct ynl_sock *ys,
if (req->_present.header)
ethtool_header_put(nlh, ETHTOOL_A_CABLE_TEST_HEADER, &req->header);
- err = ynl_exec(ys, nlh, NULL);
+ err = ynl_exec(ys, nlh, &yrs);
if (err < 0)
return -1;
@@ -4465,6 +4476,7 @@ ethtool_cable_test_tdr_act_req_free(struct ethtool_cable_test_tdr_act_req *req)
int ethtool_cable_test_tdr_act(struct ynl_sock *ys,
struct ethtool_cable_test_tdr_act_req *req)
{
+ struct ynl_req_state yrs = { .yarg = { .ys = ys, }, };
struct nlmsghdr *nlh;
int err;
@@ -4474,7 +4486,7 @@ int ethtool_cable_test_tdr_act(struct ynl_sock *ys,
if (req->_present.header)
ethtool_header_put(nlh, ETHTOOL_A_CABLE_TEST_TDR_HEADER, &req->header);
- err = ynl_exec(ys, nlh, NULL);
+ err = ynl_exec(ys, nlh, &yrs);
if (err < 0)
return -1;
@@ -4782,6 +4794,7 @@ void ethtool_fec_set_req_free(struct ethtool_fec_set_req *req)
int ethtool_fec_set(struct ynl_sock *ys, struct ethtool_fec_set_req *req)
{
+ struct ynl_req_state yrs = { .yarg = { .ys = ys, }, };
struct nlmsghdr *nlh;
int err;
@@ -4799,7 +4812,7 @@ int ethtool_fec_set(struct ynl_sock *ys, struct ethtool_fec_set_req *req)
if (req->_present.stats)
ethtool_fec_stat_put(nlh, ETHTOOL_A_FEC_STATS, &req->stats);
- err = ynl_exec(ys, nlh, NULL);
+ err = ynl_exec(ys, nlh, &yrs);
if (err < 0)
return -1;
@@ -5235,6 +5248,7 @@ void ethtool_module_set_req_free(struct ethtool_module_set_req *req)
int ethtool_module_set(struct ynl_sock *ys, struct ethtool_module_set_req *req)
{
+ struct ynl_req_state yrs = { .yarg = { .ys = ys, }, };
struct nlmsghdr *nlh;
int err;
@@ -5248,7 +5262,7 @@ int ethtool_module_set(struct ynl_sock *ys, struct ethtool_module_set_req *req)
if (req->_present.power_mode)
mnl_attr_put_u8(nlh, ETHTOOL_A_MODULE_POWER_MODE, req->power_mode);
- err = ynl_exec(ys, nlh, NULL);
+ err = ynl_exec(ys, nlh, &yrs);
if (err < 0)
return -1;
@@ -5397,6 +5411,7 @@ void ethtool_pse_set_req_free(struct ethtool_pse_set_req *req)
int ethtool_pse_set(struct ynl_sock *ys, struct ethtool_pse_set_req *req)
{
+ struct ynl_req_state yrs = { .yarg = { .ys = ys, }, };
struct nlmsghdr *nlh;
int err;
@@ -5412,7 +5427,7 @@ int ethtool_pse_set(struct ynl_sock *ys, struct ethtool_pse_set_req *req)
if (req->_present.pw_d_status)
mnl_attr_put_u32(nlh, ETHTOOL_A_PODL_PSE_PW_D_STATUS, req->pw_d_status);
- err = ynl_exec(ys, nlh, NULL);
+ err = ynl_exec(ys, nlh, &yrs);
if (err < 0)
return -1;
@@ -5746,6 +5761,7 @@ void ethtool_plca_set_cfg_req_free(struct ethtool_plca_set_cfg_req *req)
int ethtool_plca_set_cfg(struct ynl_sock *ys,
struct ethtool_plca_set_cfg_req *req)
{
+ struct ynl_req_state yrs = { .yarg = { .ys = ys, }, };
struct nlmsghdr *nlh;
int err;
@@ -5771,7 +5787,7 @@ int ethtool_plca_set_cfg(struct ynl_sock *ys,
if (req->_present.burst_tmr)
mnl_attr_put_u32(nlh, ETHTOOL_A_PLCA_BURST_TMR, req->burst_tmr);
- err = ynl_exec(ys, nlh, NULL);
+ err = ynl_exec(ys, nlh, &yrs);
if (err < 0)
return -1;
@@ -6124,6 +6140,7 @@ void ethtool_mm_set_req_free(struct ethtool_mm_set_req *req)
int ethtool_mm_set(struct ynl_sock *ys, struct ethtool_mm_set_req *req)
{
+ struct ynl_req_state yrs = { .yarg = { .ys = ys, }, };
struct nlmsghdr *nlh;
int err;
@@ -6143,7 +6160,7 @@ int ethtool_mm_set(struct ynl_sock *ys, struct ethtool_mm_set_req *req)
if (req->_present.tx_min_frag_size)
mnl_attr_put_u32(nlh, ETHTOOL_A_MM_TX_MIN_FRAG_SIZE, req->tx_min_frag_size);
- err = ynl_exec(ys, nlh, NULL);
+ err = ynl_exec(ys, nlh, &yrs);
if (err < 0)
return -1;
diff --git a/tools/net/ynl/generated/fou-user.c b/tools/net/ynl/generated/fou-user.c
index 4271b5d43c58..f30bef23bc31 100644
--- a/tools/net/ynl/generated/fou-user.c
+++ b/tools/net/ynl/generated/fou-user.c
@@ -72,6 +72,7 @@ void fou_add_req_free(struct fou_add_req *req)
int fou_add(struct ynl_sock *ys, struct fou_add_req *req)
{
+ struct ynl_req_state yrs = { .yarg = { .ys = ys, }, };
struct nlmsghdr *nlh;
int err;
@@ -99,7 +100,7 @@ int fou_add(struct ynl_sock *ys, struct fou_add_req *req)
if (req->_present.ifindex)
mnl_attr_put_u32(nlh, FOU_ATTR_IFINDEX, req->ifindex);
- err = ynl_exec(ys, nlh, NULL);
+ err = ynl_exec(ys, nlh, &yrs);
if (err < 0)
return -1;
@@ -117,6 +118,7 @@ void fou_del_req_free(struct fou_del_req *req)
int fou_del(struct ynl_sock *ys, struct fou_del_req *req)
{
+ struct ynl_req_state yrs = { .yarg = { .ys = ys, }, };
struct nlmsghdr *nlh;
int err;
@@ -140,7 +142,7 @@ int fou_del(struct ynl_sock *ys, struct fou_del_req *req)
if (req->_present.peer_v6_len)
mnl_attr_put(nlh, FOU_ATTR_PEER_V6, req->_present.peer_v6_len, req->peer_v6);
- err = ynl_exec(ys, nlh, NULL);
+ err = ynl_exec(ys, nlh, &yrs);
if (err < 0)
return -1;
diff --git a/tools/net/ynl/generated/handshake-user.c b/tools/net/ynl/generated/handshake-user.c
index 7c67765daf90..6901f8462cca 100644
--- a/tools/net/ynl/generated/handshake-user.c
+++ b/tools/net/ynl/generated/handshake-user.c
@@ -295,6 +295,7 @@ void handshake_done_req_free(struct handshake_done_req *req)
int handshake_done(struct ynl_sock *ys, struct handshake_done_req *req)
{
+ struct ynl_req_state yrs = { .yarg = { .ys = ys, }, };
struct nlmsghdr *nlh;
int err;
@@ -308,7 +309,7 @@ int handshake_done(struct ynl_sock *ys, struct handshake_done_req *req)
for (unsigned int i = 0; i < req->n_remote_auth; i++)
mnl_attr_put_u32(nlh, HANDSHAKE_A_DONE_REMOTE_AUTH, req->remote_auth[i]);
- err = ynl_exec(ys, nlh, NULL);
+ err = ynl_exec(ys, nlh, &yrs);
if (err < 0)
return -1;
diff --git a/tools/net/ynl/ynl-gen-c.py b/tools/net/ynl/ynl-gen-c.py
index 3bd6b928c14f..8337aa6de25e 100755
--- a/tools/net/ynl/ynl-gen-c.py
+++ b/tools/net/ynl/ynl-gen-c.py
@@ -1709,14 +1709,14 @@ def print_req(ri):
ret_ok = '0'
ret_err = '-1'
direction = "request"
- local_vars = ['struct nlmsghdr *nlh;',
+ local_vars = ['struct ynl_req_state yrs = { .yarg = { .ys = ys, }, };',
+ 'struct nlmsghdr *nlh;',
'int err;']
if 'reply' in ri.op[ri.op_mode]:
ret_ok = 'rsp'
ret_err = 'NULL'
- local_vars += [f'{type_name(ri, rdir(direction))} *rsp;',
- 'struct ynl_req_state yrs = { .yarg = { .ys = ys, }, };']
+ local_vars += [f'{type_name(ri, rdir(direction))} *rsp;']
print_prototype(ri, direction, terminate=False)
ri.cw.block_start()
@@ -1732,7 +1732,6 @@ def print_req(ri):
attr.attr_put(ri, "req")
ri.cw.nl()
- parse_arg = "NULL"
if 'reply' in ri.op[ri.op_mode]:
ri.cw.p('rsp = calloc(1, sizeof(*rsp));')
ri.cw.p('yrs.yarg.data = rsp;')
@@ -1742,8 +1741,7 @@ def print_req(ri):
else:
ri.cw.p(f'yrs.rsp_cmd = {ri.op.rsp_value};')
ri.cw.nl()
- parse_arg = '&yrs'
- ri.cw.p(f"err = ynl_exec(ys, nlh, {parse_arg});")
+ ri.cw.p("err = ynl_exec(ys, nlh, &yrs);")
ri.cw.p('if (err < 0)')
if 'reply' in ri.op[ri.op_mode]:
ri.cw.p('goto err_free;')
diff --git a/tools/perf/MANIFEST b/tools/perf/MANIFEST
index 1da7f4b91b4f..dc42de1785ce 100644
--- a/tools/perf/MANIFEST
+++ b/tools/perf/MANIFEST
@@ -1,3 +1,5 @@
+arch/arm64/tools/gen-sysreg.awk
+arch/arm64/tools/sysreg
tools/perf
tools/arch
tools/scripts
diff --git a/tools/perf/Makefile.perf b/tools/perf/Makefile.perf
index d88da787e815..058c9aecf608 100644
--- a/tools/perf/Makefile.perf
+++ b/tools/perf/Makefile.perf
@@ -434,6 +434,21 @@ export INSTALL SHELL_PATH
SHELL = $(SHELL_PATH)
+arm64_gen_sysreg_dir := $(srctree)/tools/arch/arm64/tools
+ifneq ($(OUTPUT),)
+ arm64_gen_sysreg_outdir := $(OUTPUT)
+else
+ arm64_gen_sysreg_outdir := $(CURDIR)
+endif
+
+arm64-sysreg-defs: FORCE
+ $(Q)$(MAKE) -C $(arm64_gen_sysreg_dir) O=$(arm64_gen_sysreg_outdir)
+
+arm64-sysreg-defs-clean:
+ $(call QUIET_CLEAN,arm64-sysreg-defs)
+ $(Q)$(MAKE) -C $(arm64_gen_sysreg_dir) O=$(arm64_gen_sysreg_outdir) \
+ clean > /dev/null
+
beauty_linux_dir := $(srctree)/tools/perf/trace/beauty/include/linux/
linux_uapi_dir := $(srctree)/tools/include/uapi/linux
asm_generic_uapi_dir := $(srctree)/tools/include/uapi/asm-generic
@@ -450,15 +465,6 @@ drm_ioctl_tbl := $(srctree)/tools/perf/trace/beauty/drm_ioctl.sh
# Create output directory if not already present
_dummy := $(shell [ -d '$(beauty_ioctl_outdir)' ] || mkdir -p '$(beauty_ioctl_outdir)')
-arm64_gen_sysreg_dir := $(srctree)/tools/arch/arm64/tools
-
-arm64-sysreg-defs: FORCE
- $(Q)$(MAKE) -C $(arm64_gen_sysreg_dir)
-
-arm64-sysreg-defs-clean:
- $(call QUIET_CLEAN,arm64-sysreg-defs)
- $(Q)$(MAKE) -C $(arm64_gen_sysreg_dir) clean > /dev/null
-
$(drm_ioctl_array): $(drm_hdr_dir)/drm.h $(drm_hdr_dir)/i915_drm.h $(drm_ioctl_tbl)
$(Q)$(SHELL) '$(drm_ioctl_tbl)' $(drm_hdr_dir) > $@
diff --git a/tools/perf/arch/mips/entry/syscalls/syscall_n64.tbl b/tools/perf/arch/mips/entry/syscalls/syscall_n64.tbl
index 80be0e98ea0c..116ff501bf92 100644
--- a/tools/perf/arch/mips/entry/syscalls/syscall_n64.tbl
+++ b/tools/perf/arch/mips/entry/syscalls/syscall_n64.tbl
@@ -367,3 +367,7 @@
450 common set_mempolicy_home_node sys_set_mempolicy_home_node
451 n64 cachestat sys_cachestat
452 n64 fchmodat2 sys_fchmodat2
+453 n64 map_shadow_stack sys_map_shadow_stack
+454 n64 futex_wake sys_futex_wake
+455 n64 futex_wait sys_futex_wait
+456 n64 futex_requeue sys_futex_requeue
diff --git a/tools/perf/arch/powerpc/entry/syscalls/syscall.tbl b/tools/perf/arch/powerpc/entry/syscalls/syscall.tbl
index e1412519b4ad..7fab411378f2 100644
--- a/tools/perf/arch/powerpc/entry/syscalls/syscall.tbl
+++ b/tools/perf/arch/powerpc/entry/syscalls/syscall.tbl
@@ -539,3 +539,7 @@
450 nospu set_mempolicy_home_node sys_set_mempolicy_home_node
451 common cachestat sys_cachestat
452 common fchmodat2 sys_fchmodat2
+453 common map_shadow_stack sys_ni_syscall
+454 common futex_wake sys_futex_wake
+455 common futex_wait sys_futex_wait
+456 common futex_requeue sys_futex_requeue
diff --git a/tools/perf/arch/s390/entry/syscalls/syscall.tbl b/tools/perf/arch/s390/entry/syscalls/syscall.tbl
index cc0bc144b661..86fec9b080f6 100644
--- a/tools/perf/arch/s390/entry/syscalls/syscall.tbl
+++ b/tools/perf/arch/s390/entry/syscalls/syscall.tbl
@@ -455,3 +455,7 @@
450 common set_mempolicy_home_node sys_set_mempolicy_home_node sys_set_mempolicy_home_node
451 common cachestat sys_cachestat sys_cachestat
452 common fchmodat2 sys_fchmodat2 sys_fchmodat2
+453 common map_shadow_stack sys_map_shadow_stack sys_map_shadow_stack
+454 common futex_wake sys_futex_wake sys_futex_wake
+455 common futex_wait sys_futex_wait sys_futex_wait
+456 common futex_requeue sys_futex_requeue sys_futex_requeue
diff --git a/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl b/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl
index 2a62eaf30d69..8cb8bf68721c 100644
--- a/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl
+++ b/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl
@@ -375,6 +375,9 @@
451 common cachestat sys_cachestat
452 common fchmodat2 sys_fchmodat2
453 64 map_shadow_stack sys_map_shadow_stack
+454 common futex_wake sys_futex_wake
+455 common futex_wait sys_futex_wait
+456 common futex_requeue sys_futex_requeue
#
# Due to a historical design error, certain syscalls are numbered differently
diff --git a/tools/perf/builtin-kwork.c b/tools/perf/builtin-kwork.c
index f007a9b27065..0092b9b39611 100644
--- a/tools/perf/builtin-kwork.c
+++ b/tools/perf/builtin-kwork.c
@@ -1643,7 +1643,7 @@ static int top_print_work(struct perf_kwork *kwork __maybe_unused, struct kwork_
/*
* pid
*/
- ret += printf(" %*ld ", PRINT_PID_WIDTH, work->id);
+ ret += printf(" %*" PRIu64 " ", PRINT_PID_WIDTH, work->id);
/*
* tgid
diff --git a/tools/perf/builtin-list.c b/tools/perf/builtin-list.c
index a343823c8ddf..61c2c96cc070 100644
--- a/tools/perf/builtin-list.c
+++ b/tools/perf/builtin-list.c
@@ -434,6 +434,11 @@ static void json_print_metric(void *ps __maybe_unused, const char *group,
strbuf_release(&buf);
}
+static bool json_skip_duplicate_pmus(void *ps __maybe_unused)
+{
+ return false;
+}
+
static bool default_skip_duplicate_pmus(void *ps)
{
struct print_state *print_state = ps;
@@ -503,6 +508,7 @@ int cmd_list(int argc, const char **argv)
.print_end = json_print_end,
.print_event = json_print_event,
.print_metric = json_print_metric,
+ .skip_duplicate_pmus = json_skip_duplicate_pmus,
};
ps = &json_ps;
} else {
diff --git a/tools/perf/pmu-events/arch/arm64/ampere/ampereone/metrics.json b/tools/perf/pmu-events/arch/arm64/ampere/ampereone/metrics.json
index e2848a9d4848..afcdad58ef89 100644
--- a/tools/perf/pmu-events/arch/arm64/ampere/ampereone/metrics.json
+++ b/tools/perf/pmu-events/arch/arm64/ampere/ampereone/metrics.json
@@ -231,6 +231,7 @@
"MetricName": "slots_lost_misspeculation_fraction",
"MetricExpr": "100 * ((OP_SPEC - OP_RETIRED) / (CPU_CYCLES * #slots))",
"BriefDescription": "Fraction of slots lost due to misspeculation",
+ "DefaultMetricgroupName": "TopdownL1",
"MetricGroup": "Default;TopdownL1",
"ScaleUnit": "1percent of slots"
},
@@ -238,6 +239,7 @@
"MetricName": "retired_fraction",
"MetricExpr": "100 * (OP_RETIRED / (CPU_CYCLES * #slots))",
"BriefDescription": "Fraction of slots retiring, useful work",
+ "DefaultMetricgroupName": "TopdownL1",
"MetricGroup": "Default;TopdownL1",
"ScaleUnit": "1percent of slots"
},
diff --git a/tools/perf/trace/beauty/include/linux/socket.h b/tools/perf/trace/beauty/include/linux/socket.h
index 39b74d83c7c4..cfcb7e2c3813 100644
--- a/tools/perf/trace/beauty/include/linux/socket.h
+++ b/tools/perf/trace/beauty/include/linux/socket.h
@@ -383,6 +383,7 @@ struct ucred {
#define SOL_MPTCP 284
#define SOL_MCTP 285
#define SOL_SMC 286
+#define SOL_VSOCK 287
/* IPX options */
#define IPX_TYPE 1
diff --git a/tools/perf/util/Build b/tools/perf/util/Build
index fb661c48992f..988473bf907a 100644
--- a/tools/perf/util/Build
+++ b/tools/perf/util/Build
@@ -347,7 +347,7 @@ CFLAGS_rbtree.o += -Wno-unused-parameter -DETC_PERFCONFIG="BUILD_STR($(ET
CFLAGS_libstring.o += -Wno-unused-parameter -DETC_PERFCONFIG="BUILD_STR($(ETC_PERFCONFIG_SQ))"
CFLAGS_hweight.o += -Wno-unused-parameter -DETC_PERFCONFIG="BUILD_STR($(ETC_PERFCONFIG_SQ))"
CFLAGS_header.o += -include $(OUTPUT)PERF-VERSION-FILE
-CFLAGS_arm-spe.o += -I$(srctree)/tools/arch/arm64/include/ -I$(srctree)/tools/arch/arm64/include/generated/
+CFLAGS_arm-spe.o += -I$(srctree)/tools/arch/arm64/include/ -I$(OUTPUT)arch/arm64/include/generated/
$(OUTPUT)util/argv_split.o: ../lib/argv_split.c FORCE
$(call rule_mkdir)
diff --git a/tools/perf/util/bpf_lock_contention.c b/tools/perf/util/bpf_lock_contention.c
index e105245eb905..f1716c089c99 100644
--- a/tools/perf/util/bpf_lock_contention.c
+++ b/tools/perf/util/bpf_lock_contention.c
@@ -12,6 +12,7 @@
#include <linux/zalloc.h>
#include <linux/string.h>
#include <bpf/bpf.h>
+#include <inttypes.h>
#include "bpf_skel/lock_contention.skel.h"
#include "bpf_skel/lock_data.h"
@@ -250,7 +251,7 @@ static const char *lock_contention_get_name(struct lock_contention *con,
if (cgrp)
return cgrp->name;
- snprintf(name_buf, sizeof(name_buf), "cgroup:%lu", cgrp_id);
+ snprintf(name_buf, sizeof(name_buf), "cgroup:%" PRIu64 "", cgrp_id);
return name_buf;
}
diff --git a/tools/perf/util/metricgroup.c b/tools/perf/util/metricgroup.c
index 0484736d9fe4..ca3e0404f187 100644
--- a/tools/perf/util/metricgroup.c
+++ b/tools/perf/util/metricgroup.c
@@ -225,7 +225,7 @@ static struct metric *metric__new(const struct pmu_metric *pm,
m->pmu = pm->pmu ?: "cpu";
m->metric_name = pm->metric_name;
- m->default_metricgroup_name = pm->default_metricgroup_name;
+ m->default_metricgroup_name = pm->default_metricgroup_name ?: "";
m->modifier = NULL;
if (modifier) {
m->modifier = strdup(modifier);
diff --git a/tools/testing/nvdimm/test/ndtest.c b/tools/testing/nvdimm/test/ndtest.c
index fd26189d53be..b8419f460368 100644
--- a/tools/testing/nvdimm/test/ndtest.c
+++ b/tools/testing/nvdimm/test/ndtest.c
@@ -924,7 +924,7 @@ static __init int ndtest_init(void)
nfit_test_setup(ndtest_resource_lookup, NULL);
- rc = class_regster(&ndtest_dimm_class);
+ rc = class_register(&ndtest_dimm_class);
if (rc)
goto err_register;
diff --git a/tools/testing/selftests/bpf/prog_tests/sockmap_listen.c b/tools/testing/selftests/bpf/prog_tests/sockmap_listen.c
index a934d430c20c..a92807bfcd13 100644
--- a/tools/testing/selftests/bpf/prog_tests/sockmap_listen.c
+++ b/tools/testing/selftests/bpf/prog_tests/sockmap_listen.c
@@ -1337,7 +1337,8 @@ static void test_redir(struct test_sockmap_listen *skel, struct bpf_map *map,
}
static void pairs_redir_to_connected(int cli0, int peer0, int cli1, int peer1,
- int sock_mapfd, int verd_mapfd, enum redir_mode mode)
+ int sock_mapfd, int nop_mapfd,
+ int verd_mapfd, enum redir_mode mode)
{
const char *log_prefix = redir_mode_str(mode);
unsigned int pass;
@@ -1351,6 +1352,12 @@ static void pairs_redir_to_connected(int cli0, int peer0, int cli1, int peer1,
if (err)
return;
+ if (nop_mapfd >= 0) {
+ err = add_to_sockmap(nop_mapfd, cli0, cli1);
+ if (err)
+ return;
+ }
+
n = write(cli1, "a", 1);
if (n < 0)
FAIL_ERRNO("%s: write", log_prefix);
@@ -1387,7 +1394,7 @@ static void unix_redir_to_connected(int sotype, int sock_mapfd,
goto close0;
c1 = sfd[0], p1 = sfd[1];
- pairs_redir_to_connected(c0, p0, c1, p1, sock_mapfd, verd_mapfd, mode);
+ pairs_redir_to_connected(c0, p0, c1, p1, sock_mapfd, -1, verd_mapfd, mode);
xclose(c1);
xclose(p1);
@@ -1677,7 +1684,7 @@ static void udp_redir_to_connected(int family, int sock_mapfd, int verd_mapfd,
if (err)
goto close_cli0;
- pairs_redir_to_connected(c0, p0, c1, p1, sock_mapfd, verd_mapfd, mode);
+ pairs_redir_to_connected(c0, p0, c1, p1, sock_mapfd, -1, verd_mapfd, mode);
xclose(c1);
xclose(p1);
@@ -1735,7 +1742,7 @@ static void inet_unix_redir_to_connected(int family, int type, int sock_mapfd,
if (err)
goto close;
- pairs_redir_to_connected(c0, p0, c1, p1, sock_mapfd, verd_mapfd, mode);
+ pairs_redir_to_connected(c0, p0, c1, p1, sock_mapfd, -1, verd_mapfd, mode);
xclose(c1);
xclose(p1);
@@ -1770,8 +1777,10 @@ static void inet_unix_skb_redir_to_connected(struct test_sockmap_listen *skel,
xbpf_prog_detach2(verdict, sock_map, BPF_SK_SKB_VERDICT);
}
-static void unix_inet_redir_to_connected(int family, int type, int sock_mapfd,
- int verd_mapfd, enum redir_mode mode)
+static void unix_inet_redir_to_connected(int family, int type,
+ int sock_mapfd, int nop_mapfd,
+ int verd_mapfd,
+ enum redir_mode mode)
{
int c0, c1, p0, p1;
int sfd[2];
@@ -1785,7 +1794,8 @@ static void unix_inet_redir_to_connected(int family, int type, int sock_mapfd,
goto close_cli0;
c1 = sfd[0], p1 = sfd[1];
- pairs_redir_to_connected(c0, p0, c1, p1, sock_mapfd, verd_mapfd, mode);
+ pairs_redir_to_connected(c0, p0, c1, p1,
+ sock_mapfd, nop_mapfd, verd_mapfd, mode);
xclose(c1);
xclose(p1);
@@ -1799,6 +1809,7 @@ static void unix_inet_skb_redir_to_connected(struct test_sockmap_listen *skel,
struct bpf_map *inner_map, int family)
{
int verdict = bpf_program__fd(skel->progs.prog_skb_verdict);
+ int nop_map = bpf_map__fd(skel->maps.nop_map);
int verdict_map = bpf_map__fd(skel->maps.verdict_map);
int sock_map = bpf_map__fd(inner_map);
int err;
@@ -1808,14 +1819,32 @@ static void unix_inet_skb_redir_to_connected(struct test_sockmap_listen *skel,
return;
skel->bss->test_ingress = false;
- unix_inet_redir_to_connected(family, SOCK_DGRAM, sock_map, verdict_map,
+ unix_inet_redir_to_connected(family, SOCK_DGRAM,
+ sock_map, -1, verdict_map,
REDIR_EGRESS);
- unix_inet_redir_to_connected(family, SOCK_STREAM, sock_map, verdict_map,
+ unix_inet_redir_to_connected(family, SOCK_DGRAM,
+ sock_map, -1, verdict_map,
+ REDIR_EGRESS);
+
+ unix_inet_redir_to_connected(family, SOCK_DGRAM,
+ sock_map, nop_map, verdict_map,
+ REDIR_EGRESS);
+ unix_inet_redir_to_connected(family, SOCK_STREAM,
+ sock_map, nop_map, verdict_map,
REDIR_EGRESS);
skel->bss->test_ingress = true;
- unix_inet_redir_to_connected(family, SOCK_DGRAM, sock_map, verdict_map,
+ unix_inet_redir_to_connected(family, SOCK_DGRAM,
+ sock_map, -1, verdict_map,
+ REDIR_INGRESS);
+ unix_inet_redir_to_connected(family, SOCK_STREAM,
+ sock_map, -1, verdict_map,
+ REDIR_INGRESS);
+
+ unix_inet_redir_to_connected(family, SOCK_DGRAM,
+ sock_map, nop_map, verdict_map,
REDIR_INGRESS);
- unix_inet_redir_to_connected(family, SOCK_STREAM, sock_map, verdict_map,
+ unix_inet_redir_to_connected(family, SOCK_STREAM,
+ sock_map, nop_map, verdict_map,
REDIR_INGRESS);
xbpf_prog_detach2(verdict, sock_map, BPF_SK_SKB_VERDICT);
diff --git a/tools/testing/selftests/bpf/prog_tests/tailcalls.c b/tools/testing/selftests/bpf/prog_tests/tailcalls.c
index fc6b2954e8f5..59993fc9c0d7 100644
--- a/tools/testing/selftests/bpf/prog_tests/tailcalls.c
+++ b/tools/testing/selftests/bpf/prog_tests/tailcalls.c
@@ -1,6 +1,9 @@
// SPDX-License-Identifier: GPL-2.0
+#include <unistd.h>
#include <test_progs.h>
#include <network_helpers.h>
+#include "tailcall_poke.skel.h"
+
/* test_tailcall_1 checks basic functionality by patching multiple locations
* in a single program for a single tail call slot with nop->jmp, jmp->nop
@@ -1105,6 +1108,85 @@ out:
bpf_object__close(tgt_obj);
}
+#define JMP_TABLE "/sys/fs/bpf/jmp_table"
+
+static int poke_thread_exit;
+
+static void *poke_update(void *arg)
+{
+ __u32 zero = 0, prog1_fd, prog2_fd, map_fd;
+ struct tailcall_poke *call = arg;
+
+ map_fd = bpf_map__fd(call->maps.jmp_table);
+ prog1_fd = bpf_program__fd(call->progs.call1);
+ prog2_fd = bpf_program__fd(call->progs.call2);
+
+ while (!poke_thread_exit) {
+ bpf_map_update_elem(map_fd, &zero, &prog1_fd, BPF_ANY);
+ bpf_map_update_elem(map_fd, &zero, &prog2_fd, BPF_ANY);
+ }
+
+ return NULL;
+}
+
+/*
+ * We are trying to hit prog array update during another program load
+ * that shares the same prog array map.
+ *
+ * For that we share the jmp_table map between two skeleton instances
+ * by pinning the jmp_table to same path. Then first skeleton instance
+ * periodically updates jmp_table in 'poke update' thread while we load
+ * the second skeleton instance in the main thread.
+ */
+static void test_tailcall_poke(void)
+{
+ struct tailcall_poke *call, *test;
+ int err, cnt = 10;
+ pthread_t thread;
+
+ unlink(JMP_TABLE);
+
+ call = tailcall_poke__open_and_load();
+ if (!ASSERT_OK_PTR(call, "tailcall_poke__open"))
+ return;
+
+ err = bpf_map__pin(call->maps.jmp_table, JMP_TABLE);
+ if (!ASSERT_OK(err, "bpf_map__pin"))
+ goto out;
+
+ err = pthread_create(&thread, NULL, poke_update, call);
+ if (!ASSERT_OK(err, "new toggler"))
+ goto out;
+
+ while (cnt--) {
+ test = tailcall_poke__open();
+ if (!ASSERT_OK_PTR(test, "tailcall_poke__open"))
+ break;
+
+ err = bpf_map__set_pin_path(test->maps.jmp_table, JMP_TABLE);
+ if (!ASSERT_OK(err, "bpf_map__pin")) {
+ tailcall_poke__destroy(test);
+ break;
+ }
+
+ bpf_program__set_autoload(test->progs.test, true);
+ bpf_program__set_autoload(test->progs.call1, false);
+ bpf_program__set_autoload(test->progs.call2, false);
+
+ err = tailcall_poke__load(test);
+ tailcall_poke__destroy(test);
+ if (!ASSERT_OK(err, "tailcall_poke__load"))
+ break;
+ }
+
+ poke_thread_exit = 1;
+ ASSERT_OK(pthread_join(thread, NULL), "pthread_join");
+
+out:
+ bpf_map__unpin(call->maps.jmp_table, JMP_TABLE);
+ tailcall_poke__destroy(call);
+}
+
void test_tailcalls(void)
{
if (test__start_subtest("tailcall_1"))
@@ -1139,4 +1221,6 @@ void test_tailcalls(void)
test_tailcall_bpf2bpf_fentry_fexit();
if (test__start_subtest("tailcall_bpf2bpf_fentry_entry"))
test_tailcall_bpf2bpf_fentry_entry();
+ if (test__start_subtest("tailcall_poke"))
+ test_tailcall_poke();
}
diff --git a/tools/testing/selftests/bpf/progs/tailcall_poke.c b/tools/testing/selftests/bpf/progs/tailcall_poke.c
new file mode 100644
index 000000000000..c78b94b75e83
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/tailcall_poke.c
@@ -0,0 +1,32 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+char _license[] SEC("license") = "GPL";
+
+struct {
+ __uint(type, BPF_MAP_TYPE_PROG_ARRAY);
+ __uint(max_entries, 1);
+ __uint(key_size, sizeof(__u32));
+ __uint(value_size, sizeof(__u32));
+} jmp_table SEC(".maps");
+
+SEC("?fentry/bpf_fentry_test1")
+int BPF_PROG(test, int a)
+{
+ bpf_tail_call_static(ctx, &jmp_table, 0);
+ return 0;
+}
+
+SEC("fentry/bpf_fentry_test1")
+int BPF_PROG(call1, int a)
+{
+ return 0;
+}
+
+SEC("fentry/bpf_fentry_test1")
+int BPF_PROG(call2, int a)
+{
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/test_sockmap_listen.c b/tools/testing/selftests/bpf/progs/test_sockmap_listen.c
index 464d35bd57c7..b7250eb9c30c 100644
--- a/tools/testing/selftests/bpf/progs/test_sockmap_listen.c
+++ b/tools/testing/selftests/bpf/progs/test_sockmap_listen.c
@@ -15,6 +15,13 @@ struct {
} sock_map SEC(".maps");
struct {
+ __uint(type, BPF_MAP_TYPE_SOCKMAP);
+ __uint(max_entries, 2);
+ __type(key, __u32);
+ __type(value, __u64);
+} nop_map SEC(".maps");
+
+struct {
__uint(type, BPF_MAP_TYPE_SOCKHASH);
__uint(max_entries, 2);
__type(key, __u32);
diff --git a/tools/testing/selftests/iommu/iommufd_utils.h b/tools/testing/selftests/iommu/iommufd_utils.h
index 050e9751321c..ad9202335656 100644
--- a/tools/testing/selftests/iommu/iommufd_utils.h
+++ b/tools/testing/selftests/iommu/iommufd_utils.h
@@ -293,15 +293,13 @@ static int _test_mock_dirty_bitmaps(int fd, __u32 hwpt_id, size_t length,
__u64 bitmap_size, __u32 flags,
struct __test_metadata *_metadata)
{
- unsigned long i, count, nbits = bitmap_size * BITS_PER_BYTE;
+ unsigned long i, nbits = bitmap_size * BITS_PER_BYTE;
unsigned long nr = nbits / 2;
__u64 out_dirty = 0;
/* Mark all even bits as dirty in the mock domain */
- for (count = 0, i = 0; i < nbits; count += !(i % 2), i++)
- if (!(i % 2))
- set_bit(i, (unsigned long *)bitmap);
- ASSERT_EQ(nr, count);
+ for (i = 0; i < nbits; i += 2)
+ set_bit(i, (unsigned long *)bitmap);
test_cmd_mock_domain_set_dirty(fd, hwpt_id, length, iova, page_size,
bitmap, &out_dirty);
@@ -311,9 +309,10 @@ static int _test_mock_dirty_bitmaps(int fd, __u32 hwpt_id, size_t length,
memset(bitmap, 0, bitmap_size);
test_cmd_get_dirty_bitmap(fd, hwpt_id, length, iova, page_size, bitmap,
flags);
- for (count = 0, i = 0; i < nbits; count += !(i % 2), i++)
+ /* Beware ASSERT_EQ() is two statements -- braces are not redundant! */
+ for (i = 0; i < nbits; i++) {
ASSERT_EQ(!(i % 2), test_bit(i, (unsigned long *)bitmap));
- ASSERT_EQ(count, out_dirty);
+ }
memset(bitmap, 0, bitmap_size);
test_cmd_get_dirty_bitmap(fd, hwpt_id, length, iova, page_size, bitmap,
diff --git a/tools/testing/selftests/kvm/Makefile b/tools/testing/selftests/kvm/Makefile
index a5963ab9215b..963435959a92 100644
--- a/tools/testing/selftests/kvm/Makefile
+++ b/tools/testing/selftests/kvm/Makefile
@@ -18,12 +18,13 @@ else
endif
ifeq ($(ARCH),arm64)
-arm64_tools_dir := $(top_srcdir)/tools/arch/arm64/tools/
+tools_dir := $(top_srcdir)/tools
+arm64_tools_dir := $(tools_dir)/arch/arm64/tools/
GEN_HDRS := $(top_srcdir)/tools/arch/arm64/include/generated/
CFLAGS += -I$(GEN_HDRS)
$(GEN_HDRS): $(wildcard $(arm64_tools_dir)/*)
- $(MAKE) -C $(arm64_tools_dir)
+ $(MAKE) -C $(arm64_tools_dir) O=$(tools_dir)
endif
LIBKVM += lib/assert.c
@@ -223,7 +224,7 @@ else
LINUX_TOOL_ARCH_INCLUDE = $(top_srcdir)/tools/arch/$(ARCH)/include
endif
CFLAGS += -Wall -Wstrict-prototypes -Wuninitialized -O2 -g -std=gnu99 \
- -Wno-gnu-variable-sized-type-not-at-end -MD\
+ -Wno-gnu-variable-sized-type-not-at-end -MD -MP \
-fno-builtin-memcmp -fno-builtin-memcpy -fno-builtin-memset \
-fno-builtin-strnlen \
-fno-stack-protector -fno-PIE -I$(LINUX_TOOL_INCLUDE) \
diff --git a/tools/testing/selftests/kvm/x86_64/nx_huge_pages_test.c b/tools/testing/selftests/kvm/x86_64/nx_huge_pages_test.c
index 18ac5c1952a3..83e25bccc139 100644
--- a/tools/testing/selftests/kvm/x86_64/nx_huge_pages_test.c
+++ b/tools/testing/selftests/kvm/x86_64/nx_huge_pages_test.c
@@ -259,7 +259,7 @@ int main(int argc, char **argv)
__TEST_REQUIRE(token == MAGIC_TOKEN,
"This test must be run with the magic token %d.\n"
"This is done by nx_huge_pages_test.sh, which\n"
- "also handles environment setup for the test.");
+ "also handles environment setup for the test.", MAGIC_TOKEN);
run_test(reclaim_period_ms, false, reboot_permissions);
run_test(reclaim_period_ms, true, reboot_permissions);
diff --git a/tools/testing/selftests/mm/Makefile b/tools/testing/selftests/mm/Makefile
index 78dfec8bc676..dede0bcf97a3 100644
--- a/tools/testing/selftests/mm/Makefile
+++ b/tools/testing/selftests/mm/Makefile
@@ -60,7 +60,7 @@ TEST_GEN_FILES += mrelease_test
TEST_GEN_FILES += mremap_dontunmap
TEST_GEN_FILES += mremap_test
TEST_GEN_FILES += on-fault-limit
-TEST_GEN_PROGS += pagemap_ioctl
+TEST_GEN_FILES += pagemap_ioctl
TEST_GEN_FILES += thuge-gen
TEST_GEN_FILES += transhuge-stress
TEST_GEN_FILES += uffd-stress
@@ -72,7 +72,7 @@ TEST_GEN_FILES += mdwe_test
TEST_GEN_FILES += hugetlb_fault_after_madv
ifneq ($(ARCH),arm64)
-TEST_GEN_PROGS += soft-dirty
+TEST_GEN_FILES += soft-dirty
endif
ifeq ($(ARCH),x86_64)
diff --git a/tools/testing/selftests/mm/pagemap_ioctl.c b/tools/testing/selftests/mm/pagemap_ioctl.c
index befab43719ba..d59517ed3d48 100644
--- a/tools/testing/selftests/mm/pagemap_ioctl.c
+++ b/tools/testing/selftests/mm/pagemap_ioctl.c
@@ -36,6 +36,7 @@ int pagemap_fd;
int uffd;
int page_size;
int hpage_size;
+const char *progname;
#define LEN(region) ((region.end - region.start)/page_size)
@@ -1149,11 +1150,11 @@ int sanity_tests(void)
munmap(mem, mem_size);
/* 9. Memory mapped file */
- fd = open(__FILE__, O_RDONLY);
+ fd = open(progname, O_RDONLY);
if (fd < 0)
ksft_exit_fail_msg("%s Memory mapped file\n", __func__);
- ret = stat(__FILE__, &sbuf);
+ ret = stat(progname, &sbuf);
if (ret < 0)
ksft_exit_fail_msg("error %d %d %s\n", ret, errno, strerror(errno));
@@ -1472,12 +1473,14 @@ static void transact_test(int page_size)
extra_thread_faults);
}
-int main(void)
+int main(int argc, char *argv[])
{
int mem_size, shmid, buf_size, fd, i, ret;
char *mem, *map, *fmem;
struct stat sbuf;
+ progname = argv[0];
+
ksft_print_header();
if (init_uffd())
diff --git a/tools/testing/selftests/net/af_unix/diag_uid.c b/tools/testing/selftests/net/af_unix/diag_uid.c
index 5b88f7129fea..79a3dd75590e 100644
--- a/tools/testing/selftests/net/af_unix/diag_uid.c
+++ b/tools/testing/selftests/net/af_unix/diag_uid.c
@@ -148,7 +148,6 @@ void receive_response(struct __test_metadata *_metadata,
.msg_iov = &iov,
.msg_iovlen = 1
};
- struct unix_diag_req *udr;
struct nlmsghdr *nlh;
int ret;
diff --git a/tools/testing/selftests/net/cmsg_sender.c b/tools/testing/selftests/net/cmsg_sender.c
index 24b21b15ed3f..6ff3e732f449 100644
--- a/tools/testing/selftests/net/cmsg_sender.c
+++ b/tools/testing/selftests/net/cmsg_sender.c
@@ -416,9 +416,9 @@ int main(int argc, char *argv[])
{
struct addrinfo hints, *ai;
struct iovec iov[1];
+ unsigned char *buf;
struct msghdr msg;
char cbuf[1024];
- char *buf;
int err;
int fd;
diff --git a/tools/testing/selftests/net/ipsec.c b/tools/testing/selftests/net/ipsec.c
index 9a8229abfa02..be4a30a0d02a 100644
--- a/tools/testing/selftests/net/ipsec.c
+++ b/tools/testing/selftests/net/ipsec.c
@@ -2263,7 +2263,7 @@ static int check_results(void)
int main(int argc, char **argv)
{
- unsigned int nr_process = 1;
+ long nr_process = 1;
int route_sock = -1, ret = KSFT_SKIP;
int test_desc_fd[2];
uint32_t route_seq;
@@ -2284,7 +2284,7 @@ int main(int argc, char **argv)
exit_usage(argv);
}
- if (nr_process > MAX_PROCESSES || !nr_process) {
+ if (nr_process > MAX_PROCESSES || nr_process < 1) {
printk("nr_process should be between [1; %u]",
MAX_PROCESSES);
exit_usage(argv);
diff --git a/tools/testing/selftests/net/mptcp/mptcp_connect.c b/tools/testing/selftests/net/mptcp/mptcp_connect.c
index c7f9ebeebc2c..d2043ec3bf6d 100644
--- a/tools/testing/selftests/net/mptcp/mptcp_connect.c
+++ b/tools/testing/selftests/net/mptcp/mptcp_connect.c
@@ -18,6 +18,7 @@
#include <sys/ioctl.h>
#include <sys/poll.h>
+#include <sys/random.h>
#include <sys/sendfile.h>
#include <sys/stat.h>
#include <sys/socket.h>
@@ -1125,15 +1126,11 @@ again:
static void init_rng(void)
{
- int fd = open("/dev/urandom", O_RDONLY);
unsigned int foo;
- if (fd > 0) {
- int ret = read(fd, &foo, sizeof(foo));
-
- if (ret < 0)
- srand(fd + foo);
- close(fd);
+ if (getrandom(&foo, sizeof(foo), 0) == -1) {
+ perror("getrandom");
+ exit(1);
}
srand(foo);
diff --git a/tools/testing/selftests/net/mptcp/mptcp_inq.c b/tools/testing/selftests/net/mptcp/mptcp_inq.c
index 8672d898f8cd..218aac467321 100644
--- a/tools/testing/selftests/net/mptcp/mptcp_inq.c
+++ b/tools/testing/selftests/net/mptcp/mptcp_inq.c
@@ -18,6 +18,7 @@
#include <time.h>
#include <sys/ioctl.h>
+#include <sys/random.h>
#include <sys/socket.h>
#include <sys/types.h>
#include <sys/wait.h>
@@ -519,15 +520,11 @@ static int client(int unixfd)
static void init_rng(void)
{
- int fd = open("/dev/urandom", O_RDONLY);
unsigned int foo;
- if (fd > 0) {
- int ret = read(fd, &foo, sizeof(foo));
-
- if (ret < 0)
- srand(fd + foo);
- close(fd);
+ if (getrandom(&foo, sizeof(foo), 0) == -1) {
+ perror("getrandom");
+ exit(1);
}
srand(foo);
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 486800a7024b..3b1b9e8dd70c 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -115,8 +115,6 @@ EXPORT_SYMBOL_GPL(kvm_debugfs_dir);
static const struct file_operations stat_fops_per_vm;
-static struct file_operations kvm_chardev_ops;
-
static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl,
unsigned long arg);
#ifdef CONFIG_KVM_COMPAT
@@ -1157,9 +1155,6 @@ static struct kvm *kvm_create_vm(unsigned long type, const char *fdname)
if (!kvm)
return ERR_PTR(-ENOMEM);
- /* KVM is pinned via open("/dev/kvm"), the fd passed to this ioctl(). */
- __module_get(kvm_chardev_ops.owner);
-
KVM_MMU_LOCK_INIT(kvm);
mmgrab(current->mm);
kvm->mm = current->mm;
@@ -1279,7 +1274,6 @@ out_err_no_irq_srcu:
out_err_no_srcu:
kvm_arch_free_vm(kvm);
mmdrop(current->mm);
- module_put(kvm_chardev_ops.owner);
return ERR_PTR(r);
}
@@ -1348,7 +1342,6 @@ static void kvm_destroy_vm(struct kvm *kvm)
preempt_notifier_dec();
hardware_disable_all();
mmdrop(mm);
- module_put(kvm_chardev_ops.owner);
}
void kvm_get_kvm(struct kvm *kvm)
@@ -3887,7 +3880,7 @@ static int kvm_vcpu_release(struct inode *inode, struct file *filp)
return 0;
}
-static const struct file_operations kvm_vcpu_fops = {
+static struct file_operations kvm_vcpu_fops = {
.release = kvm_vcpu_release,
.unlocked_ioctl = kvm_vcpu_ioctl,
.mmap = kvm_vcpu_mmap,
@@ -4081,6 +4074,7 @@ static int kvm_vcpu_stats_release(struct inode *inode, struct file *file)
}
static const struct file_operations kvm_vcpu_stats_fops = {
+ .owner = THIS_MODULE,
.read = kvm_vcpu_stats_read,
.release = kvm_vcpu_stats_release,
.llseek = noop_llseek,
@@ -4431,7 +4425,7 @@ static int kvm_device_release(struct inode *inode, struct file *filp)
return 0;
}
-static const struct file_operations kvm_device_fops = {
+static struct file_operations kvm_device_fops = {
.unlocked_ioctl = kvm_device_ioctl,
.release = kvm_device_release,
KVM_COMPAT(kvm_device_ioctl),
@@ -4759,6 +4753,7 @@ static int kvm_vm_stats_release(struct inode *inode, struct file *file)
}
static const struct file_operations kvm_vm_stats_fops = {
+ .owner = THIS_MODULE,
.read = kvm_vm_stats_read,
.release = kvm_vm_stats_release,
.llseek = noop_llseek,
@@ -5060,7 +5055,7 @@ static long kvm_vm_compat_ioctl(struct file *filp,
}
#endif
-static const struct file_operations kvm_vm_fops = {
+static struct file_operations kvm_vm_fops = {
.release = kvm_vm_release,
.unlocked_ioctl = kvm_vm_ioctl,
.llseek = noop_llseek,
@@ -6095,6 +6090,9 @@ int kvm_init(unsigned vcpu_size, unsigned vcpu_align, struct module *module)
goto err_async_pf;
kvm_chardev_ops.owner = module;
+ kvm_vm_fops.owner = module;
+ kvm_vcpu_fops.owner = module;
+ kvm_device_fops.owner = module;
kvm_preempt_ops.sched_in = kvm_sched_in;
kvm_preempt_ops.sched_out = kvm_sched_out;