aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/accel/qaic/qaic.rst6
-rw-r--r--Documentation/admin-guide/kernel-parameters.txt4
-rw-r--r--Documentation/arch/arm64/silicon-errata.rst18
-rw-r--r--Documentation/devicetree/bindings/ata/rockchip,dwc-ahci.yaml3
-rw-r--r--Documentation/devicetree/bindings/display/bridge/toshiba,tc358767.yaml21
-rw-r--r--Documentation/devicetree/bindings/display/panel/boe,th101mb31ig002-28a.yaml21
-rw-r--r--Documentation/devicetree/bindings/display/panel/himax,hx8394.yaml17
-rw-r--r--Documentation/devicetree/bindings/display/panel/ilitek,ili9806e.yaml1
-rw-r--r--Documentation/devicetree/bindings/display/panel/jadard,jd9365da-h3.yaml1
-rw-r--r--Documentation/driver-api/thermal/sysfs-api.rst65
-rw-r--r--Documentation/gpu/todo.rst35
-rw-r--r--Documentation/netlink/specs/ethtool.yaml2
-rw-r--r--Documentation/networking/ethtool-netlink.rst1
-rw-r--r--Documentation/userspace-api/media/v4l/pixfmt-yuv-luma.rst4
-rw-r--r--Documentation/virt/kvm/api.rst8
-rw-r--r--MAINTAINERS13
-rw-r--r--Makefile2
-rw-r--r--arch/alpha/include/asm/io.h4
-rw-r--r--arch/arm/Kconfig4
-rw-r--r--arch/arm/boot/compressed/Makefile1
-rw-r--r--arch/arm/boot/compressed/vmlinux.lds.S2
-rw-r--r--arch/arm/boot/dts/arm/versatile-ab.dts2
-rw-r--r--arch/arm/include/asm/stacktrace.h7
-rw-r--r--arch/arm/include/asm/vmlinux.lds.h2
-rw-r--r--arch/arm/kernel/entry-armv.S3
-rw-r--r--arch/arm/kernel/entry-common.S3
-rw-r--r--arch/arm/kernel/module.c5
-rw-r--r--arch/arm/kernel/perf_callchain.c3
-rw-r--r--arch/arm/kernel/vmlinux-xip.lds.S4
-rw-r--r--arch/arm/kernel/vmlinux.lds.S6
-rw-r--r--arch/arm/mach-alpine/alpine_cpu_pm.c2
-rw-r--r--arch/arm/mm/proc.c20
-rw-r--r--arch/arm64/Kconfig22
-rw-r--r--arch/arm64/include/asm/cputype.h4
-rw-r--r--arch/arm64/include/asm/jump_label.h1
-rw-r--r--arch/arm64/kernel/Makefile.syscalls2
-rw-r--r--arch/arm64/kernel/cpu_errata.c11
-rw-r--r--arch/arm64/kernel/jump_label.c11
-rw-r--r--arch/loongarch/kernel/Makefile.syscalls3
-rw-r--r--arch/parisc/Kconfig1
-rw-r--r--arch/parisc/include/asm/cache.h11
-rw-r--r--arch/parisc/net/bpf_jit_core.c2
-rw-r--r--arch/powerpc/kernel/nvram_64.c8
-rw-r--r--arch/powerpc/platforms/powernv/opal-kmsg.c4
-rw-r--r--arch/riscv/kernel/Makefile.syscalls2
-rw-r--r--arch/riscv/kernel/cpufeature.c14
-rw-r--r--arch/riscv/kernel/sbi-ipi.c2
-rw-r--r--arch/riscv/mm/fault.c17
-rw-r--r--arch/riscv/mm/init.c15
-rw-r--r--arch/riscv/purgatory/entry.S2
-rw-r--r--arch/s390/kernel/alternative.h0
-rw-r--r--arch/s390/kernel/fpu.c2
-rw-r--r--arch/s390/kernel/vmlinux.lds.S17
-rw-r--r--arch/s390/mm/dump_pagetables.c140
-rw-r--r--arch/s390/mm/init.c9
-rw-r--r--arch/s390/mm/vmem.c13
-rw-r--r--arch/um/drivers/mconsole_user.c2
-rw-r--r--arch/um/kernel/kmsg_dump.c2
-rw-r--r--arch/x86/coco/sev/core.c2
-rw-r--r--arch/x86/entry/syscalls/syscall_64.tbl2
-rw-r--r--arch/x86/events/core.c22
-rw-r--r--arch/x86/events/intel/cstate.c5
-rw-r--r--arch/x86/include/asm/cmdline.h4
-rw-r--r--arch/x86/include/asm/kvm_host.h1
-rw-r--r--arch/x86/kernel/cpu/amd.c2
-rw-r--r--arch/x86/kernel/cpu/aperfmperf.c6
-rw-r--r--arch/x86/kernel/setup.c2
-rw-r--r--arch/x86/kvm/Kconfig4
-rw-r--r--arch/x86/kvm/lapic.c2
-rw-r--r--arch/x86/kvm/mmu/mmu.c7
-rw-r--r--arch/x86/kvm/svm/sev.c17
-rw-r--r--arch/x86/kvm/svm/svm.c1
-rw-r--r--arch/x86/kvm/x86.c12
-rw-r--r--arch/x86/lib/cmdline.c25
-rw-r--r--arch/x86/lib/getuser.S4
-rw-r--r--arch/x86/mm/pti.c8
-rw-r--r--drivers/accel/ivpu/ivpu_fw.c4
-rw-r--r--drivers/bluetooth/Kconfig2
-rw-r--r--drivers/bluetooth/btintel.c3
-rw-r--r--drivers/bluetooth/btmtk.c5
-rw-r--r--drivers/cache/Kconfig1
-rw-r--r--drivers/dma-buf/dma-heap.c27
-rw-r--r--drivers/edac/skx_common.h1
-rw-r--r--drivers/firmware/efi/libstub/Makefile7
-rw-r--r--drivers/gpu/drm/Kconfig3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mes_v11_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mes_v12_0.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_stream.c2
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp_ddc.c2
-rw-r--r--drivers/gpu/drm/amd/include/mes_v11_api_def.h3
-rw-r--r--drivers/gpu/drm/amd/include/mes_v12_api_def.h3
-rw-r--r--drivers/gpu/drm/amd/pm/amdgpu_dpm.c3
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppevvmath.h14
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c86
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c2
-rw-r--r--drivers/gpu/drm/ast/ast_dp.c186
-rw-r--r--drivers/gpu/drm/ast/ast_drv.c5
-rw-r--r--drivers/gpu/drm/ast/ast_drv.h4
-rw-r--r--drivers/gpu/drm/ast/ast_main.c6
-rw-r--r--drivers/gpu/drm/ast/ast_mode.c31
-rw-r--r--drivers/gpu/drm/ast/ast_post.c2
-rw-r--r--drivers/gpu/drm/ast/ast_reg.h22
-rw-r--r--drivers/gpu/drm/bridge/analogix/analogix_dp_core.c5
-rw-r--r--drivers/gpu/drm/bridge/ite-it6505.c17
-rw-r--r--drivers/gpu/drm/bridge/lontium-lt9611uxc.c103
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c7
-rw-r--r--drivers/gpu/drm/bridge/tc358767.c45
-rw-r--r--drivers/gpu/drm/ci/arm64.config1
-rw-r--r--drivers/gpu/drm/ci/gitlab-ci.yml7
-rwxr-xr-xdrivers/gpu/drm/ci/igt_runner.sh11
-rw-r--r--drivers/gpu/drm/ci/test.yml123
-rw-r--r--drivers/gpu/drm/ci/xfails/amdgpu-stoney-fails.txt2
-rw-r--r--drivers/gpu/drm/ci/xfails/amdgpu-stoney-flakes.txt14
-rw-r--r--drivers/gpu/drm/ci/xfails/amdgpu-stoney-skips.txt5
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-amly-fails.txt12
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-amly-flakes.txt41
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-amly-skips.txt5
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-apl-flakes.txt2
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-apl-skips.txt4
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-cml-fails.txt14
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-cml-flakes.txt9
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-cml-skips.txt5
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-glk-fails.txt24
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-glk-flakes.txt8
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-glk-skips.txt4
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-kbl-fails.txt2
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-kbl-flakes.txt2
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-kbl-skips.txt4
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-tgl-fails.txt25
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-tgl-skips.txt4
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-whl-fails.txt17
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-whl-flakes.txt2
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-whl-skips.txt5
-rw-r--r--drivers/gpu/drm/ci/xfails/mediatek-mt8173-fails.txt10
-rw-r--r--drivers/gpu/drm/ci/xfails/mediatek-mt8173-flakes.txt32
-rw-r--r--drivers/gpu/drm/ci/xfails/mediatek-mt8173-skips.txt5
-rw-r--r--drivers/gpu/drm/ci/xfails/mediatek-mt8183-fails.txt28
-rw-r--r--drivers/gpu/drm/ci/xfails/mediatek-mt8183-flakes.txt20
-rw-r--r--drivers/gpu/drm/ci/xfails/mediatek-mt8183-skips.txt7
-rw-r--r--drivers/gpu/drm/ci/xfails/meson-g12b-fails.txt19
-rw-r--r--drivers/gpu/drm/ci/xfails/meson-g12b-skips.txt7
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-apq8016-fails.txt6
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-apq8016-skips.txt3
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-apq8096-fails.txt1
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-apq8096-flakes.txt2
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-apq8096-skips.txt5
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-kingoftown-fails.txt146
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-kingoftown-flakes.txt18
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-kingoftown-skips.txt6
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-lazor-limozeen-fails.txt146
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-lazor-limozeen-flakes.txt11
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-lazor-limozeen-skips.txt3
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-sdm845-fails.txt1
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-sdm845-flakes.txt105
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-sdm845-skips.txt5
-rw-r--r--drivers/gpu/drm/ci/xfails/panfrost-g12b-fails.txt1
-rw-r--r--drivers/gpu/drm/ci/xfails/panfrost-g12b-skips.txt23
-rw-r--r--drivers/gpu/drm/ci/xfails/panfrost-mt8183-fails.txt1
-rw-r--r--drivers/gpu/drm/ci/xfails/panfrost-mt8183-skips.txt23
-rw-r--r--drivers/gpu/drm/ci/xfails/panfrost-rk3288-fails.txt1
-rw-r--r--drivers/gpu/drm/ci/xfails/panfrost-rk3288-skips.txt26
-rw-r--r--drivers/gpu/drm/ci/xfails/panfrost-rk3399-fails.txt1
-rw-r--r--drivers/gpu/drm/ci/xfails/panfrost-rk3399-flakes.txt6
-rw-r--r--drivers/gpu/drm/ci/xfails/panfrost-rk3399-skips.txt26
-rw-r--r--drivers/gpu/drm/ci/xfails/rockchip-rk3288-fails.txt22
-rw-r--r--drivers/gpu/drm/ci/xfails/rockchip-rk3288-flakes.txt6
-rw-r--r--drivers/gpu/drm/ci/xfails/rockchip-rk3288-skips.txt57
-rw-r--r--drivers/gpu/drm/ci/xfails/rockchip-rk3399-fails.txt90
-rw-r--r--drivers/gpu/drm/ci/xfails/rockchip-rk3399-flakes.txt50
-rw-r--r--drivers/gpu/drm/ci/xfails/rockchip-rk3399-skips.txt10
-rw-r--r--drivers/gpu/drm/ci/xfails/virtio_gpu-none-fails.txt65
-rw-r--r--drivers/gpu/drm/ci/xfails/virtio_gpu-none-skips.txt5
-rw-r--r--drivers/gpu/drm/ci/xfails/vkms-none-fails.txt5
-rw-r--r--drivers/gpu/drm/ci/xfails/vkms-none-flakes.txt21
-rw-r--r--drivers/gpu/drm/ci/xfails/vkms-none-skips.txt106
-rw-r--r--drivers/gpu/drm/display/drm_dp_helper.c66
-rw-r--r--drivers/gpu/drm/display/drm_dp_mst_topology.c2
-rw-r--r--drivers/gpu/drm/drm_atomic_uapi.c5
-rw-r--r--drivers/gpu/drm/drm_bridge.c9
-rw-r--r--drivers/gpu/drm/drm_bridge_connector.c8
-rw-r--r--drivers/gpu/drm/drm_client.c2
-rw-r--r--drivers/gpu/drm/drm_connector.c83
-rw-r--r--drivers/gpu/drm/drm_crtc_internal.h7
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c13
-rw-r--r--drivers/gpu/drm/drm_mode_config.c2
-rw-r--r--drivers/gpu/drm/drm_panel.c18
-rw-r--r--drivers/gpu/drm/drm_panel_orientation_quirks.c6
-rw-r--r--drivers/gpu/drm/drm_panic.c26
-rw-r--r--drivers/gpu/drm/drm_probe_helper.c2
-rw-r--r--drivers/gpu/drm/drm_vblank.c81
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_sched.c2
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_lvds.c2
-rw-r--r--drivers/gpu/drm/gma500/intel_bios.c22
-rw-r--r--drivers/gpu/drm/gma500/intel_bios.h4
-rw-r--r--drivers/gpu/drm/gma500/intel_gmbus.c2
-rw-r--r--drivers/gpu/drm/gma500/psb_drv.h2
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_drv.h2
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_lvds.c4
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_sdvo.c26
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_wa.h8
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbc.c6
-rw-r--r--drivers/gpu/drm/i915/i915_perf.c33
-rw-r--r--drivers/gpu/drm/imagination/pvr_queue.c4
-rw-r--r--drivers/gpu/drm/lima/lima_sched.c2
-rw-r--r--drivers/gpu/drm/loongson/lsdc_ttm.c8
-rw-r--r--drivers/gpu/drm/mgag200/Makefile1
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_bmc.c111
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_drv.c40
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_drv.h58
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_g200.c5
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_g200eh.c11
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_g200eh3.c11
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_g200er.c22
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_g200ev.c22
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_g200ew3.c13
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_g200se.c22
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_g200wb.c13
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_mode.c183
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_reg.h7
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_vga_bmc.c156
-rw-r--r--drivers/gpu/drm/mxsfb/lcdif_kms.c5
-rw-r--r--drivers/gpu/drm/nouveau/Kbuild1
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/crtc.c57
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/dac.c2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/dfp.c2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/disp.c7
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/disp.h2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/hw.c9
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/tvnv04.c4
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/tvnv17.c6
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/base507c.c21
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/base827c.c2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/base907c.c10
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/core507d.c8
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/corec37d.c6
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/corec57d.c2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/crc907d.c4
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/crcc37d.c4
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/crcc57d.c2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/dac507d.c2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/dac907d.c2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/disp.c78
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/disp.h14
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/head507d.c24
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/head827d.c10
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/head907d.c26
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/head917d.c6
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/headc37d.c18
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/headc57d.c12
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/ovly507e.c6
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/ovly827e.c2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/ovly907e.c2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/pior507d.c2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/sor507d.c2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/sor907d.c2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/sorc37d.c2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/wimmc37b.c7
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/wndwc37e.c24
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/wndwc57e.c10
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/wndwc67e.c2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/cl0080.h7
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/class.h3
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/client.h11
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/device.h37
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/driver.h5
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/if0000.h10
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/if0002.h39
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/if0003.h34
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/ioctl.h27
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/object.h24
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/os.h19
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/client.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/device.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/layout.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/object.h14
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/oclass.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/os.h19
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/pci.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/tegra.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/pm.h29
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_abi16.c330
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_abi16.h6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.h1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c12
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.h50
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo0039.c6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo5039.c6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo74c1.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo85b5.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo9039.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo90b5.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_boa0b5.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_chan.c100
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_chan.h8
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dma.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dmem.c10
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c387
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h61
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.c47
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c23
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_hwmon.c46
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_led.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_mem.c38
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_mem.h4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_nvif.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_platform.c12
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_prime.c3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_sched.c6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_sgdma.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_ttm.c12
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_usif.c194
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_usif.h10
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_uvmm.c1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_vga.c14
-rw-r--r--drivers/gpu/drm/nouveau/nv04_fence.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv10_fence.c4
-rw-r--r--drivers/gpu/drm/nouveau/nv17_fence.c12
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fence.c4
-rw-r--r--drivers/gpu/drm/nouveau/nv84_fence.c16
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_fence.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvif/client.c32
-rw-r--r--drivers/gpu/drm/nouveau/nvif/device.c15
-rw-r--r--drivers/gpu/drm/nouveau/nvif/driver.c32
-rw-r--r--drivers/gpu/drm/nouveau/nvif/object.c40
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/core/client.c64
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/core/ioctl.c91
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/core/object.c50
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/core/oproxy.c42
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/core/uevent.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/Kbuild1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/base.c479
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/priv.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/user.c93
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/chan.c24
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/pm/Kbuild11
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/pm/base.c867
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/pm/g84.c165
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/pm/gf100.c243
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/pm/gf100.h20
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/pm/gf108.c66
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/pm/gf117.c80
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/pm/gk104.c184
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/pm/gt200.c157
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/pm/gt215.c138
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/pm/nv40.c123
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/pm/nv40.h15
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/pm/nv50.c175
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/pm/priv.h105
-rw-r--r--drivers/gpu/drm/omapdrm/dss/base.c3
-rw-r--r--drivers/gpu/drm/panel/panel-boe-bf060y8m-aj0.c2
-rw-r--r--drivers/gpu/drm/panel/panel-boe-th101mb31ig002-28a.c325
-rw-r--r--drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c190
-rw-r--r--drivers/gpu/drm/panel/panel-edp.c37
-rw-r--r--drivers/gpu/drm/panel/panel-himax-hx8394.c153
-rw-r--r--drivers/gpu/drm/panel/panel-ilitek-ili9806e.c165
-rw-r--r--drivers/gpu/drm/panel/panel-jadard-jd9365da-h3.c317
-rw-r--r--drivers/gpu/drm/panel/panel-novatek-nt36672e.c69
-rw-r--r--drivers/gpu/drm/panel/panel-simple.c26
-rw-r--r--drivers/gpu/drm/panel/panel-sony-tulip-truly-nt35521.c29
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_job.c2
-rw-r--r--drivers/gpu/drm/panthor/panthor_mmu.c2
-rw-r--r--drivers/gpu/drm/panthor/panthor_sched.c2
-rw-r--r--drivers/gpu/drm/radeon/evergreen_cs.c2
-rw-r--r--drivers/gpu/drm/scheduler/sched_main.c25
-rw-r--r--drivers/gpu/drm/sti/sti_dvo.c1
-rw-r--r--drivers/gpu/drm/sti/sti_hda.c1
-rw-r--r--drivers/gpu/drm/sti/sti_hdmi.c1
-rw-r--r--drivers/gpu/drm/sti/sti_hqvdp.c1
-rw-r--r--drivers/gpu/drm/sti/sti_tvout.c1
-rw-r--r--drivers/gpu/drm/sti/sti_vtg.c1
-rw-r--r--drivers/gpu/drm/stm/Kconfig1
-rw-r--r--drivers/gpu/drm/stm/drv.c7
-rw-r--r--drivers/gpu/drm/stm/ltdc.c107
-rw-r--r--drivers/gpu/drm/stm/lvds.c1
-rw-r--r--drivers/gpu/drm/tegra/drm.c6
-rw-r--r--drivers/gpu/drm/tests/drm_gem_shmem_test.c27
-rw-r--r--drivers/gpu/drm/ttm/tests/ttm_bo_test.c6
-rw-r--r--drivers/gpu/drm/ttm/tests/ttm_resource_test.c2
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c460
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c151
-rw-r--r--drivers/gpu/drm/ttm/ttm_device.c29
-rw-r--r--drivers/gpu/drm/ttm/ttm_pool.c2
-rw-r--r--drivers/gpu/drm/ttm/ttm_resource.c251
-rw-r--r--drivers/gpu/drm/v3d/v3d_bo.c12
-rw-r--r--drivers/gpu/drm/v3d/v3d_drv.c11
-rw-r--r--drivers/gpu/drm/v3d/v3d_drv.h16
-rw-r--r--drivers/gpu/drm/v3d/v3d_perfmon.c40
-rw-r--r--drivers/gpu/drm/v3d/v3d_performance_counters.h16
-rw-r--r--drivers/gpu/drm/v3d/v3d_sched.c121
-rw-r--r--drivers/gpu/drm/v3d/v3d_submit.c292
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_submit.c2
-rw-r--r--drivers/gpu/drm/vkms/vkms_drv.h1
-rw-r--r--drivers/gpu/drm/vkms/vkms_formats.c14
-rw-r--r--drivers/gpu/drm/vmwgfx/vmw_surface_cache.h10
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_bo.c127
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_bo.h15
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h40
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fence.c17
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_gem.c62
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c502
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.h17
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c14
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_prime.c32
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c27
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c33
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c174
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_surface.c280
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_vkms.c40
-rw-r--r--drivers/gpu/drm/xe/Makefile23
-rw-r--r--drivers/gpu/drm/xe/display/intel_fbdev_fb.c6
-rw-r--r--drivers/gpu/drm/xe/display/xe_display_wa.c16
-rw-r--r--drivers/gpu/drm/xe/display/xe_dsb_buffer.c8
-rw-r--r--drivers/gpu/drm/xe/display/xe_fb_pin.c3
-rw-r--r--drivers/gpu/drm/xe/display/xe_plane_initial.c6
-rw-r--r--drivers/gpu/drm/xe/regs/xe_gt_regs.h15
-rw-r--r--drivers/gpu/drm/xe/regs/xe_regs.h12
-rw-r--r--drivers/gpu/drm/xe/regs/xe_sriov_regs.h23
-rw-r--r--drivers/gpu/drm/xe/tests/Makefile6
-rw-r--r--drivers/gpu/drm/xe/tests/xe_bo.c45
-rw-r--r--drivers/gpu/drm/xe/tests/xe_bo_test.c21
-rw-r--r--drivers/gpu/drm/xe/tests/xe_bo_test.h14
-rw-r--r--drivers/gpu/drm/xe/tests/xe_dma_buf.c26
-rw-r--r--drivers/gpu/drm/xe/tests/xe_dma_buf_test.c20
-rw-r--r--drivers/gpu/drm/xe/tests/xe_dma_buf_test.h13
-rw-r--r--drivers/gpu/drm/xe/tests/xe_kunit_helpers.c39
-rw-r--r--drivers/gpu/drm/xe/tests/xe_kunit_helpers.h2
-rw-r--r--drivers/gpu/drm/xe/tests/xe_live_test_mod.c11
-rw-r--r--drivers/gpu/drm/xe/tests/xe_migrate.c424
-rw-r--r--drivers/gpu/drm/xe/tests/xe_migrate_test.c20
-rw-r--r--drivers/gpu/drm/xe/tests/xe_migrate_test.h13
-rw-r--r--drivers/gpu/drm/xe/tests/xe_mocs.c44
-rw-r--r--drivers/gpu/drm/xe/tests/xe_mocs_test.c21
-rw-r--r--drivers/gpu/drm/xe/tests/xe_mocs_test.h14
-rw-r--r--drivers/gpu/drm/xe/tests/xe_pci.c30
-rw-r--r--drivers/gpu/drm/xe/tests/xe_pci_test.c4
-rw-r--r--drivers/gpu/drm/xe/tests/xe_pci_test.h2
-rw-r--r--drivers/gpu/drm/xe/tests/xe_rtp_test.c219
-rw-r--r--drivers/gpu/drm/xe/tests/xe_test.h10
-rw-r--r--drivers/gpu/drm/xe/tests/xe_wa_test.c1
-rw-r--r--drivers/gpu/drm/xe/xe_bo.c11
-rw-r--r--drivers/gpu/drm/xe/xe_bo.h5
-rw-r--r--drivers/gpu/drm/xe/xe_bo_types.h2
-rw-r--r--drivers/gpu/drm/xe/xe_devcoredump.c10
-rw-r--r--drivers/gpu/drm/xe/xe_device.c111
-rw-r--r--drivers/gpu/drm/xe/xe_device.h9
-rw-r--r--drivers/gpu/drm/xe/xe_device_types.h30
-rw-r--r--drivers/gpu/drm/xe/xe_drm_client.c5
-rw-r--r--drivers/gpu/drm/xe/xe_exec_queue.c33
-rw-r--r--drivers/gpu/drm/xe/xe_exec_queue.h2
-rw-r--r--drivers/gpu/drm/xe/xe_exec_queue_types.h13
-rw-r--r--drivers/gpu/drm/xe/xe_execlist.c3
-rw-r--r--drivers/gpu/drm/xe/xe_gen_wa_oob.c16
-rw-r--r--drivers/gpu/drm/xe/xe_gt.c54
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf.c2
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c1
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_vf.c28
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_vf.h1
-rw-r--r--drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c205
-rw-r--r--drivers/gpu/drm/xe/xe_gt_tlb_invalidation.h12
-rw-r--r--drivers/gpu/drm/xe/xe_gt_tlb_invalidation_types.h4
-rw-r--r--drivers/gpu/drm/xe/xe_gt_topology.c27
-rw-r--r--drivers/gpu/drm/xe/xe_gt_types.h25
-rw-r--r--drivers/gpu/drm/xe/xe_guc_ct.c11
-rw-r--r--drivers/gpu/drm/xe/xe_guc_id_mgr.c4
-rw-r--r--drivers/gpu/drm/xe/xe_guc_submit.c56
-rw-r--r--drivers/gpu/drm/xe/xe_heci_gsc.c28
-rw-r--r--drivers/gpu/drm/xe/xe_heci_gsc.h10
-rw-r--r--drivers/gpu/drm/xe/xe_irq.c2
-rw-r--r--drivers/gpu/drm/xe/xe_lmtt.c4
-rw-r--r--drivers/gpu/drm/xe/xe_migrate.c528
-rw-r--r--drivers/gpu/drm/xe/xe_migrate.h34
-rw-r--r--drivers/gpu/drm/xe/xe_mmio.c231
-rw-r--r--drivers/gpu/drm/xe/xe_mmio.h1
-rw-r--r--drivers/gpu/drm/xe/xe_oa.c2
-rw-r--r--drivers/gpu/drm/xe/xe_pat.c11
-rw-r--r--drivers/gpu/drm/xe/xe_pci.c7
-rw-r--r--drivers/gpu/drm/xe/xe_pm.c8
-rw-r--r--drivers/gpu/drm/xe/xe_preempt_fence.c12
-rw-r--r--drivers/gpu/drm/xe/xe_pt.c1310
-rw-r--r--drivers/gpu/drm/xe/xe_pt.h14
-rw-r--r--drivers/gpu/drm/xe/xe_pt_types.h48
-rw-r--r--drivers/gpu/drm/xe/xe_query.c4
-rw-r--r--drivers/gpu/drm/xe/xe_rtp.c42
-rw-r--r--drivers/gpu/drm/xe/xe_rtp.h4
-rw-r--r--drivers/gpu/drm/xe/xe_rtp_helpers.h6
-rw-r--r--drivers/gpu/drm/xe/xe_sa.c7
-rw-r--r--drivers/gpu/drm/xe/xe_sriov.c2
-rw-r--r--drivers/gpu/drm/xe/xe_sync.c20
-rw-r--r--drivers/gpu/drm/xe/xe_sync.h1
-rw-r--r--drivers/gpu/drm/xe/xe_trace.h52
-rw-r--r--drivers/gpu/drm/xe/xe_trace_bo.h10
-rw-r--r--drivers/gpu/drm/xe/xe_tuning.c8
-rw-r--r--drivers/gpu/drm/xe/xe_uc_fw.c3
-rw-r--r--drivers/gpu/drm/xe/xe_vm.c703
-rw-r--r--drivers/gpu/drm/xe/xe_vm.h2
-rw-r--r--drivers/gpu/drm/xe/xe_vm_types.h55
-rw-r--r--drivers/gpu/drm/xe/xe_wa.c15
-rw-r--r--drivers/gpu/drm/xe/xe_wa.h7
-rw-r--r--drivers/gpu/drm/xe/xe_wa_oob.rules2
-rw-r--r--drivers/hid/amd-sfh-hid/amd_sfh_client.c18
-rw-r--r--drivers/hid/bpf/Kconfig2
-rw-r--r--drivers/hid/bpf/hid_bpf_struct_ops.c5
-rw-r--r--drivers/hid/wacom_wac.c67
-rw-r--r--drivers/hv/hv_common.c4
-rw-r--r--drivers/hwmon/adt7475.c24
-rw-r--r--drivers/input/input-mt.c3
-rw-r--r--drivers/input/touchscreen/cyttsp4_core.c2
-rw-r--r--drivers/irqchip/irq-loongarch-cpu.c6
-rw-r--r--drivers/irqchip/irq-mbigen.c20
-rw-r--r--drivers/irqchip/irq-meson-gpio.c14
-rw-r--r--drivers/irqchip/irq-pic32-evic.c6
-rw-r--r--drivers/irqchip/irq-sun6i-r.c2
-rw-r--r--drivers/media/dvb-frontends/stv0367_priv.h3
-rw-r--r--drivers/media/pci/intel/ipu6/Kconfig3
-rw-r--r--drivers/media/usb/uvc/uvc_ctrl.c8
-rw-r--r--drivers/mtd/mtdoops.c6
-rw-r--r--drivers/net/can/usb/etas_es58x/es58x_devlink.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c6
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c14
-rw-r--r--drivers/net/ethernet/intel/ice/ice.h11
-rw-r--r--drivers/net/ethernet/intel/ice/ice_base.c4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.c10
-rw-r--r--drivers/net/ethernet/intel/ice/ice_xsk.c184
-rw-r--r--drivers/net/ethernet/intel/ice/ice_xsk.h14
-rw-r--r--drivers/net/ethernet/intel/igc/igc_main.c33
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/irq_affinity.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c2
-rw-r--r--drivers/net/ethernet/meta/Kconfig2
-rw-r--r--drivers/net/ethernet/realtek/r8169_main.c8
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_main.c2
-rw-r--r--drivers/net/fjes/fjes_main.c4
-rw-r--r--drivers/net/phy/aquantia/aquantia_main.c29
-rw-r--r--drivers/net/phy/micrel.c34
-rw-r--r--drivers/net/phy/realtek.c7
-rw-r--r--drivers/net/usb/sr9700.c11
-rw-r--r--drivers/net/wan/fsl_qmc_hdlc.c31
-rw-r--r--drivers/net/wireless/ath/ath12k/pci.c3
-rw-r--r--drivers/net/wireless/ath/ath12k/wow.c8
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/main.c1
-rw-r--r--drivers/nfc/pn544/i2c.c2
-rw-r--r--drivers/pci/hotplug/pciehp_hpc.c4
-rw-r--r--drivers/pci/pci.c15
-rw-r--r--drivers/perf/riscv_pmu_sbi.c2
-rw-r--r--drivers/platform/chrome/cros_ec_proto.c2
-rw-r--r--drivers/platform/x86/sony-laptop.c1
-rw-r--r--drivers/s390/cio/ccwgroup.c1
-rw-r--r--drivers/s390/cio/vfio_ccw_drv.c1
-rw-r--r--drivers/scsi/isci/init.c6
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr_os.c11
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.c20
-rw-r--r--drivers/scsi/sd.c5
-rw-r--r--drivers/scsi/sr_ioctl.c2
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/math_support.h5
-rw-r--r--drivers/thermal/intel/int340x_thermal/processor_thermal_device_pci.c29
-rw-r--r--drivers/thermal/thermal_trip.c4
-rw-r--r--drivers/ufs/core/ufshcd-priv.h5
-rw-r--r--drivers/ufs/core/ufshcd.c27
-rw-r--r--drivers/ufs/host/ufs-exynos.c3
-rw-r--r--drivers/vdpa/octeon_ep/octep_vdpa_hw.c2
-rw-r--r--drivers/video/fbdev/core/fbcon.c16
-rw-r--r--drivers/virtio/virtio.c28
-rw-r--r--drivers/virtio/virtio_pci_common.c190
-rw-r--r--drivers/virtio/virtio_pci_common.h16
-rw-r--r--drivers/virtio/virtio_pci_modern.c159
-rw-r--r--fs/btrfs/block-group.c13
-rw-r--r--fs/btrfs/ctree.h1
-rw-r--r--fs/btrfs/direct-io.c38
-rw-r--r--fs/btrfs/extent-tree.c3
-rw-r--r--fs/btrfs/extent_map.c2
-rw-r--r--fs/btrfs/file.c17
-rw-r--r--fs/btrfs/free-space-cache.c4
-rw-r--r--fs/btrfs/inode.c18
-rw-r--r--fs/btrfs/space-info.c5
-rw-r--r--fs/btrfs/space-info.h1
-rw-r--r--fs/btrfs/tests/extent-map-tests.c99
-rw-r--r--fs/btrfs/tree-checker.c49
-rw-r--r--fs/ceph/caps.c35
-rw-r--r--fs/ceph/super.h7
-rw-r--r--fs/file.c1
-rw-r--r--fs/pstore/platform.c10
-rw-r--r--fs/smb/client/cifsfs.h4
-rw-r--r--fs/smb/client/cifsglob.h24
-rw-r--r--fs/smb/client/cifsproto.h2
-rw-r--r--fs/smb/client/inode.c17
-rw-r--r--fs/smb/client/ioctl.c32
-rw-r--r--fs/smb/client/misc.c54
-rw-r--r--fs/smb/client/reparse.c4
-rw-r--r--fs/smb/client/reparse.h19
-rw-r--r--fs/smb/client/smb2inode.c8
-rw-r--r--fs/smb/client/trace.h51
-rw-r--r--fs/xfs/libxfs/xfs_quota_defs.h2
-rw-r--r--fs/xfs/libxfs/xfs_trans_resv.c28
-rw-r--r--fs/xfs/scrub/agheader_repair.c2
-rw-r--r--fs/xfs/scrub/parent.c2
-rw-r--r--fs/xfs/scrub/trace.h10
-rw-r--r--fs/xfs/xfs_attr_list.c2
-rw-r--r--fs/xfs/xfs_trace.h10
-rw-r--r--fs/xfs/xfs_xattr.c19
-rw-r--r--include/asm-generic/vmlinux.lds.h11
-rw-r--r--include/drm/display/drm_dp.h4
-rw-r--r--include/drm/display/drm_dp_helper.h3
-rw-r--r--include/drm/drm_connector.h8
-rw-r--r--include/drm/drm_device.h5
-rw-r--r--include/drm/drm_vblank.h37
-rw-r--r--include/drm/gpu_scheduler.h2
-rw-r--r--include/drm/ttm/ttm_bo.h48
-rw-r--r--include/drm/ttm/ttm_resource.h97
-rw-r--r--include/linux/compiler.h9
-rw-r--r--include/linux/cpuhotplug.h2
-rw-r--r--include/linux/dma-heap.h21
-rw-r--r--include/linux/fb.h1
-rw-r--r--include/linux/kmsg_dump.h22
-rw-r--r--include/linux/kvm_host.h9
-rw-r--r--include/linux/minmax.h115
-rw-r--r--include/linux/profile.h1
-rw-r--r--include/linux/virtio.h3
-rw-r--r--include/linux/virtio_config.h4
-rw-r--r--include/linux/virtio_net.h16
-rw-r--r--include/sound/ump_convert.h1
-rw-r--r--include/trace/events/btrfs.h8
-rw-r--r--include/trace/events/mptcp.h2
-rw-r--r--include/uapi/asm-generic/unistd.h5
-rw-r--r--include/uapi/drm/xe_drm.h18
-rw-r--r--include/uapi/linux/virtio_gpu.h1
-rw-r--r--include/ufs/ufshcd.h1
-rw-r--r--include/ufs/ufshci.h1
-rw-r--r--init/Kconfig1
-rw-r--r--io_uring/napi.c2
-rw-r--r--io_uring/poll.c1
-rw-r--r--kernel/jump_label.c4
-rw-r--r--kernel/ksysfs.c7
-rw-r--r--kernel/locking/qspinlock_paravirt.h2
-rw-r--r--kernel/panic.c2
-rw-r--r--kernel/printk/printk.c11
-rw-r--r--kernel/profile.c242
-rw-r--r--kernel/sched/core.c68
-rw-r--r--kernel/sched/cputime.c6
-rw-r--r--kernel/sched/stats.c10
-rw-r--r--kernel/task_work.c6
-rw-r--r--kernel/time/clocksource.c2
-rw-r--r--kernel/time/tick-broadcast.c3
-rw-r--r--kernel/trace/preemptirq_delay_test.c2
-rw-r--r--lib/btree.c1
-rw-r--r--lib/decompress_unlzma.c2
-rw-r--r--lib/vsprintf.c2
-rw-r--r--mm/zsmalloc.c2
-rw-r--r--net/bluetooth/hci_core.c7
-rw-r--r--net/bluetooth/hci_event.c5
-rw-r--r--net/bluetooth/hci_sync.c21
-rw-r--r--net/core/dev.c1
-rw-r--r--net/core/rtnetlink.c2
-rw-r--r--net/ethtool/ioctl.c43
-rw-r--r--net/ethtool/rss.c8
-rw-r--r--net/ipv4/netfilter/iptable_nat.c18
-rw-r--r--net/ipv4/tcp_input.c23
-rw-r--r--net/ipv4/tcp_offload.c3
-rw-r--r--net/ipv4/udp_offload.c4
-rw-r--r--net/ipv6/ndisc.c34
-rw-r--r--net/ipv6/netfilter/ip6table_nat.c14
-rw-r--r--net/iucv/af_iucv.c4
-rw-r--r--net/mac80211/cfg.c7
-rw-r--r--net/mac80211/tx.c5
-rw-r--r--net/mac80211/util.c2
-rw-r--r--net/mptcp/mib.c2
-rw-r--r--net/mptcp/mib.h2
-rw-r--r--net/mptcp/options.c2
-rw-r--r--net/mptcp/pm.c12
-rw-r--r--net/mptcp/pm_netlink.c46
-rw-r--r--net/mptcp/pm_userspace.c18
-rw-r--r--net/mptcp/protocol.c18
-rw-r--r--net/mptcp/protocol.h4
-rw-r--r--net/mptcp/subflow.c26
-rw-r--r--net/sched/act_ct.c4
-rw-r--r--net/smc/af_smc.c7
-rw-r--r--net/wireless/scan.c11
-rw-r--r--net/wireless/sme.c1
-rw-r--r--scripts/syscall.tbl5
-rw-r--r--sound/core/seq/seq_ports.h14
-rw-r--r--sound/core/seq/seq_ump_convert.c132
-rw-r--r--sound/core/ump_convert.c60
-rw-r--r--sound/firewire/amdtp-stream.c38
-rw-r--r--sound/firewire/amdtp-stream.h1
-rw-r--r--sound/pci/hda/hda_controller.h2
-rw-r--r--sound/pci/hda/hda_generic.c63
-rw-r--r--sound/pci/hda/hda_generic.h1
-rw-r--r--sound/pci/hda/hda_intel.c10
-rw-r--r--sound/pci/hda/patch_conexant.c56
-rw-r--r--sound/pci/hda/patch_realtek.c1
-rw-r--r--sound/usb/stream.c4
-rw-r--r--tools/build/feature/Makefile53
-rw-r--r--tools/perf/Documentation/Build.txt28
-rw-r--r--tools/perf/Makefile.config20
-rw-r--r--tools/perf/Makefile.perf27
-rw-r--r--tools/perf/pmu-events/arch/riscv/andes/ax45/firmware.json2
-rw-r--r--tools/perf/pmu-events/arch/riscv/riscv-sbi-firmware.json2
-rw-r--r--tools/perf/pmu-events/arch/riscv/sifive/u74/firmware.json2
-rw-r--r--tools/perf/pmu-events/arch/riscv/starfive/dubhe-80/firmware.json2
-rw-r--r--tools/perf/pmu-events/arch/riscv/thead/c900-legacy/firmware.json2
-rw-r--r--tools/perf/util/callchain.c2
-rw-r--r--tools/testing/selftests/bpf/Makefile2
-rw-r--r--tools/testing/selftests/bpf/prog_tests/uprobe_syscall.c4
-rw-r--r--tools/testing/selftests/dmabuf-heaps/dmabuf-heap.c4
-rwxr-xr-xtools/testing/selftests/drivers/net/hw/rss_ctx.py37
-rw-r--r--tools/testing/selftests/hid/hid_bpf.c26
-rw-r--r--tools/testing/selftests/hid/progs/hid.c2
-rw-r--r--tools/testing/selftests/hid/progs/hid_bpf_helpers.h2
-rw-r--r--tools/testing/selftests/kvm/riscv/get-reg-list.c8
-rw-r--r--tools/testing/selftests/mm/mremap_test.c2
-rw-r--r--tools/testing/selftests/net/mptcp/mptcp_connect.c8
-rwxr-xr-xtools/testing/selftests/net/mptcp/mptcp_join.sh103
-rw-r--r--tools/testing/selftests/seccomp/seccomp_bpf.c2
-rw-r--r--virt/kvm/Kconfig4
-rw-r--r--virt/kvm/guest_memfd.c227
-rw-r--r--virt/kvm/kvm_main.c49
736 files changed, 12298 insertions, 10484 deletions
diff --git a/Documentation/accel/qaic/qaic.rst b/Documentation/accel/qaic/qaic.rst
index efb7771273bb..62a8d2b4711d 100644
--- a/Documentation/accel/qaic/qaic.rst
+++ b/Documentation/accel/qaic/qaic.rst
@@ -147,12 +147,6 @@ DRM_IOCTL_QAIC_PERF_STATS_BO
recent execution of a BO. This allows userspace to construct an end to end
timeline of the BO processing for a performance analysis.
-DRM_IOCTL_QAIC_PART_DEV
- This IOCTL allows userspace to request a duplicate "shadow device". This extra
- accelN device is associated with a specific partition of resources on the
- AIC100 device and can be used for limiting a process to some subset of
- resources.
-
DRM_IOCTL_QAIC_DETACH_SLICE_BO
This IOCTL allows userspace to remove the slicing information from a BO that
was originally provided by a call to DRM_IOCTL_QAIC_ATTACH_SLICE_BO. This
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
index f1384c7b59c9..09126bb8cc9f 100644
--- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -4798,11 +4798,9 @@
profile= [KNL] Enable kernel profiling via /proc/profile
Format: [<profiletype>,]<number>
- Param: <profiletype>: "schedule", "sleep", or "kvm"
+ Param: <profiletype>: "schedule" or "kvm"
[defaults to kernel profiling]
Param: "schedule" - profile schedule points.
- Param: "sleep" - profile D-state sleeping (millisecs).
- Requires CONFIG_SCHEDSTATS
Param: "kvm" - profile VM exits.
Param: <number> - step/bucket size as a power of 2 for
statistical time based profiling.
diff --git a/Documentation/arch/arm64/silicon-errata.rst b/Documentation/arch/arm64/silicon-errata.rst
index bb83c5d8c675..50327c05be8d 100644
--- a/Documentation/arch/arm64/silicon-errata.rst
+++ b/Documentation/arch/arm64/silicon-errata.rst
@@ -122,10 +122,18 @@ stable kernels.
+----------------+-----------------+-----------------+-----------------------------+
| ARM | Cortex-A76 | #1490853 | N/A |
+----------------+-----------------+-----------------+-----------------------------+
+| ARM | Cortex-A76 | #3324349 | ARM64_ERRATUM_3194386 |
++----------------+-----------------+-----------------+-----------------------------+
| ARM | Cortex-A77 | #1491015 | N/A |
+----------------+-----------------+-----------------+-----------------------------+
| ARM | Cortex-A77 | #1508412 | ARM64_ERRATUM_1508412 |
+----------------+-----------------+-----------------+-----------------------------+
+| ARM | Cortex-A77 | #3324348 | ARM64_ERRATUM_3194386 |
++----------------+-----------------+-----------------+-----------------------------+
+| ARM | Cortex-A78 | #3324344 | ARM64_ERRATUM_3194386 |
++----------------+-----------------+-----------------+-----------------------------+
+| ARM | Cortex-A78C | #3324346,3324347| ARM64_ERRATUM_3194386 |
++----------------+-----------------+-----------------+-----------------------------+
| ARM | Cortex-A710 | #2119858 | ARM64_ERRATUM_2119858 |
+----------------+-----------------+-----------------+-----------------------------+
| ARM | Cortex-A710 | #2054223 | ARM64_ERRATUM_2054223 |
@@ -138,8 +146,14 @@ stable kernels.
+----------------+-----------------+-----------------+-----------------------------+
| ARM | Cortex-A720 | #3456091 | ARM64_ERRATUM_3194386 |
+----------------+-----------------+-----------------+-----------------------------+
+| ARM | Cortex-A725 | #3456106 | ARM64_ERRATUM_3194386 |
++----------------+-----------------+-----------------+-----------------------------+
| ARM | Cortex-X1 | #1502854 | N/A |
+----------------+-----------------+-----------------+-----------------------------+
+| ARM | Cortex-X1 | #3324344 | ARM64_ERRATUM_3194386 |
++----------------+-----------------+-----------------+-----------------------------+
+| ARM | Cortex-X1C | #3324346 | ARM64_ERRATUM_3194386 |
++----------------+-----------------+-----------------+-----------------------------+
| ARM | Cortex-X2 | #2119858 | ARM64_ERRATUM_2119858 |
+----------------+-----------------+-----------------+-----------------------------+
| ARM | Cortex-X2 | #2224489 | ARM64_ERRATUM_2224489 |
@@ -160,6 +174,8 @@ stable kernels.
+----------------+-----------------+-----------------+-----------------------------+
| ARM | Neoverse-N1 | #1542419 | ARM64_ERRATUM_1542419 |
+----------------+-----------------+-----------------+-----------------------------+
+| ARM | Neoverse-N1 | #3324349 | ARM64_ERRATUM_3194386 |
++----------------+-----------------+-----------------+-----------------------------+
| ARM | Neoverse-N2 | #2139208 | ARM64_ERRATUM_2139208 |
+----------------+-----------------+-----------------+-----------------------------+
| ARM | Neoverse-N2 | #2067961 | ARM64_ERRATUM_2067961 |
@@ -170,6 +186,8 @@ stable kernels.
+----------------+-----------------+-----------------+-----------------------------+
| ARM | Neoverse-V1 | #1619801 | N/A |
+----------------+-----------------+-----------------+-----------------------------+
+| ARM | Neoverse-V1 | #3324341 | ARM64_ERRATUM_3194386 |
++----------------+-----------------+-----------------+-----------------------------+
| ARM | Neoverse-V2 | #3324336 | ARM64_ERRATUM_3194386 |
+----------------+-----------------+-----------------+-----------------------------+
| ARM | Neoverse-V3 | #3312417 | ARM64_ERRATUM_3194386 |
diff --git a/Documentation/devicetree/bindings/ata/rockchip,dwc-ahci.yaml b/Documentation/devicetree/bindings/ata/rockchip,dwc-ahci.yaml
index b5e5767d8698..13eaa8d9a16e 100644
--- a/Documentation/devicetree/bindings/ata/rockchip,dwc-ahci.yaml
+++ b/Documentation/devicetree/bindings/ata/rockchip,dwc-ahci.yaml
@@ -35,6 +35,9 @@ properties:
ports-implemented:
const: 1
+ power-domains:
+ maxItems: 1
+
sata-port@0:
$ref: /schemas/ata/snps,dwc-ahci-common.yaml#/$defs/dwc-ahci-port
diff --git a/Documentation/devicetree/bindings/display/bridge/toshiba,tc358767.yaml b/Documentation/devicetree/bindings/display/bridge/toshiba,tc358767.yaml
index 2ad0cd6dd49e..b78f64c9c5f4 100644
--- a/Documentation/devicetree/bindings/display/bridge/toshiba,tc358767.yaml
+++ b/Documentation/devicetree/bindings/display/bridge/toshiba,tc358767.yaml
@@ -92,12 +92,31 @@ properties:
reference to a valid DPI output or input endpoint node.
port@2:
- $ref: /schemas/graph.yaml#/properties/port
+ $ref: /schemas/graph.yaml#/$defs/port-base
+ unevaluatedProperties: false
description: |
eDP/DP output port. The remote endpoint phandle should be a
reference to a valid eDP panel input endpoint node. This port is
optional, treated as DP panel if not defined
+ properties:
+ endpoint:
+ $ref: /schemas/media/video-interfaces.yaml#
+ unevaluatedProperties: false
+
+ properties:
+ toshiba,pre-emphasis:
+ description:
+ Display port output Pre-Emphasis settings for both DP lanes.
+ $ref: /schemas/types.yaml#/definitions/uint8-array
+ minItems: 2
+ maxItems: 2
+ items:
+ enum:
+ - 0 # No pre-emphasis
+ - 1 # 3.5dB pre-emphasis
+ - 2 # 6dB pre-emphasis
+
oneOf:
- required:
- port@0
diff --git a/Documentation/devicetree/bindings/display/panel/boe,th101mb31ig002-28a.yaml b/Documentation/devicetree/bindings/display/panel/boe,th101mb31ig002-28a.yaml
index 5eaccce13c21..6a82bd1ec763 100644
--- a/Documentation/devicetree/bindings/display/panel/boe,th101mb31ig002-28a.yaml
+++ b/Documentation/devicetree/bindings/display/panel/boe,th101mb31ig002-28a.yaml
@@ -9,20 +9,20 @@ title: BOE TH101MB31IG002-28A WXGA DSI Display Panel
maintainers:
- Manuel Traut <[email protected]>
-allOf:
- - $ref: panel-common.yaml#
-
properties:
compatible:
enum:
# BOE TH101MB31IG002-28A 10.1" WXGA TFT LCD panel
- boe,th101mb31ig002-28a
+ # The Starry-er88577 is a 10.1" WXGA TFT-LCD panel
+ - starry,er88577
reg:
maxItems: 1
backlight: true
enable-gpios: true
+ reset-gpios: true
power-supply: true
port: true
rotation: true
@@ -33,6 +33,20 @@ required:
- enable-gpios
- power-supply
+allOf:
+ - $ref: panel-common.yaml#
+ - if:
+ properties:
+ compatible:
+ # The Starry-er88577 is a 10.1" WXGA TFT-LCD panel
+ const: starry,er88577
+ then:
+ properties:
+ reset-gpios: false
+ else:
+ required:
+ - reset-gpios
+
additionalProperties: false
examples:
@@ -47,6 +61,7 @@ examples:
reg = <0>;
backlight = <&backlight_lcd0>;
enable-gpios = <&gpio 45 GPIO_ACTIVE_HIGH>;
+ reset-gpios = <&gpio 55 GPIO_ACTIVE_LOW>;
rotation = <90>;
power-supply = <&vcc_3v3>;
port {
diff --git a/Documentation/devicetree/bindings/display/panel/himax,hx8394.yaml b/Documentation/devicetree/bindings/display/panel/himax,hx8394.yaml
index 644387e4fb6f..75ccabff308b 100644
--- a/Documentation/devicetree/bindings/display/panel/himax,hx8394.yaml
+++ b/Documentation/devicetree/bindings/display/panel/himax,hx8394.yaml
@@ -15,14 +15,12 @@ description:
such as the HannStar HSD060BHW4 720x1440 TFT LCD panel connected with
a MIPI-DSI video interface.
-allOf:
- - $ref: panel-common.yaml#
-
properties:
compatible:
items:
- enum:
- hannstar,hsd060bhw4
+ - microchip,ac40t08a-mipi-panel
- powkiddy,x55-panel
- const: himax,hx8394
@@ -46,7 +44,6 @@ properties:
required:
- compatible
- reg
- - reset-gpios
- backlight
- port
- vcc-supply
@@ -54,6 +51,18 @@ required:
additionalProperties: false
+allOf:
+ - $ref: panel-common.yaml#
+ - if:
+ not:
+ properties:
+ compatible:
+ enum:
+ - microchip,ac40t08a-mipi-panel
+ then:
+ required:
+ - reset-gpios
+
examples:
- |
#include <dt-bindings/gpio/gpio.h>
diff --git a/Documentation/devicetree/bindings/display/panel/ilitek,ili9806e.yaml b/Documentation/devicetree/bindings/display/panel/ilitek,ili9806e.yaml
index cfd7cc9c8725..f80307579485 100644
--- a/Documentation/devicetree/bindings/display/panel/ilitek,ili9806e.yaml
+++ b/Documentation/devicetree/bindings/display/panel/ilitek,ili9806e.yaml
@@ -16,6 +16,7 @@ properties:
compatible:
items:
- enum:
+ - densitron,dmt028vghmcmi-1d
- ortustech,com35h3p70ulc
- const: ilitek,ili9806e
diff --git a/Documentation/devicetree/bindings/display/panel/jadard,jd9365da-h3.yaml b/Documentation/devicetree/bindings/display/panel/jadard,jd9365da-h3.yaml
index 3d5bede98cf1..b8783eba3ddc 100644
--- a/Documentation/devicetree/bindings/display/panel/jadard,jd9365da-h3.yaml
+++ b/Documentation/devicetree/bindings/display/panel/jadard,jd9365da-h3.yaml
@@ -18,6 +18,7 @@ properties:
- enum:
- chongzhou,cz101b4001
- kingdisplay,kd101ne3-40ti
+ - melfas,lmfbx101117480
- radxa,display-10hd-ad001
- radxa,display-8hd-ad002
- const: jadard,jd9365da-h3
diff --git a/Documentation/driver-api/thermal/sysfs-api.rst b/Documentation/driver-api/thermal/sysfs-api.rst
index 6c1175c6afba..978198f8a18b 100644
--- a/Documentation/driver-api/thermal/sysfs-api.rst
+++ b/Documentation/driver-api/thermal/sysfs-api.rst
@@ -4,8 +4,6 @@ Generic Thermal Sysfs driver How To
Written by Sujith Thomas <[email protected]>, Zhang Rui <[email protected]>
-Updated: 2 January 2008
-
Copyright (c) 2008 Intel Corporation
@@ -38,23 +36,23 @@ temperature) and throttle appropriate devices.
::
- struct thermal_zone_device
- *thermal_zone_device_register(char *type,
- int trips, int mask, void *devdata,
- struct thermal_zone_device_ops *ops,
- const struct thermal_zone_params *tzp,
- int passive_delay, int polling_delay))
+ struct thermal_zone_device *
+ thermal_zone_device_register_with_trips(const char *type,
+ const struct thermal_trip *trips,
+ int num_trips, void *devdata,
+ const struct thermal_zone_device_ops *ops,
+ const struct thermal_zone_params *tzp,
+ unsigned int passive_delay,
+ unsigned int polling_delay)
- This interface function adds a new thermal zone device (sensor) to
+ This interface function adds a new thermal zone device (sensor) to the
/sys/class/thermal folder as `thermal_zone[0-*]`. It tries to bind all the
- thermal cooling devices registered at the same time.
+ thermal cooling devices registered to it at the same time.
type:
the thermal zone type.
trips:
- the total number of trip points this thermal zone supports.
- mask:
- Bit string: If 'n'th bit is set, then trip point 'n' is writable.
+ the table of trip points for this thermal zone.
devdata:
device private data
ops:
@@ -67,32 +65,29 @@ temperature) and throttle appropriate devices.
.get_temp:
get the current temperature of the thermal zone.
.set_trips:
- set the trip points window. Whenever the current temperature
- is updated, the trip points immediately below and above the
- current temperature are found.
- .get_mode:
- get the current mode (enabled/disabled) of the thermal zone.
-
- - "enabled" means the kernel thermal management is
- enabled.
- - "disabled" will prevent kernel thermal driver action
- upon trip points so that user applications can take
- charge of thermal management.
- .set_mode:
- set the mode (enabled/disabled) of the thermal zone.
- .get_trip_type:
- get the type of certain trip point.
- .get_trip_temp:
- get the temperature above which the certain trip point
- will be fired.
+ set the trip points window. Whenever the current temperature
+ is updated, the trip points immediately below and above the
+ current temperature are found.
+ .change_mode:
+ change the mode (enabled/disabled) of the thermal zone.
+ .set_trip_temp:
+ set the temperature of a given trip point.
+ .get_crit_temp:
+ get the critical temperature for this thermal zone.
.set_emul_temp:
- set the emulation temperature which helps in debugging
- different threshold temperature points.
+ set the emulation temperature which helps in debugging
+ different threshold temperature points.
+ .get_trend:
+ get the trend of most recent zone temperature changes.
+ .hot:
+ hot trip point crossing handler.
+ .critical:
+ critical trip point crossing handler.
tzp:
thermal zone platform parameters.
passive_delay:
- number of milliseconds to wait between polls when
- performing passive cooling.
+ number of milliseconds to wait between polls when performing passive
+ cooling.
polling_delay:
number of milliseconds to wait between polls when checking
whether trip points have been crossed (0 for interrupt driven systems).
diff --git a/Documentation/gpu/todo.rst b/Documentation/gpu/todo.rst
index 2ea6ffc9b22b..96c453980ab6 100644
--- a/Documentation/gpu/todo.rst
+++ b/Documentation/gpu/todo.rst
@@ -475,25 +475,22 @@ Remove disable/unprepare in remove/shutdown in panel-simple and panel-edp
As of commit d2aacaf07395 ("drm/panel: Check for already prepared/enabled in
drm_panel"), we have a check in the drm_panel core to make sure nobody
double-calls prepare/enable/disable/unprepare. Eventually that should probably
-be turned into a WARN_ON() or somehow made louder, but right now we actually
-expect it to trigger and so we don't want it to be too loud.
-
-Specifically, that warning will trigger for panel-edp and panel-simple at
-shutdown time because those panels hardcode a call to drm_panel_disable()
-and drm_panel_unprepare() at shutdown and remove time that they call regardless
-of panel state. On systems with a properly coded DRM modeset driver that
-calls drm_atomic_helper_shutdown() this is pretty much guaranteed to cause
-the warning to fire.
-
-Unfortunately we can't safely remove the calls in panel-edp and panel-simple
-until we're sure that all DRM modeset drivers that are used with those panels
-properly call drm_atomic_helper_shutdown(). This TODO item is to validate
-that all DRM modeset drivers used with panel-edp and panel-simple properly
-call drm_atomic_helper_shutdown() and then remove the calls to
-disable/unprepare from those panels. Alternatively, this TODO item could be
-removed by convincing stakeholders that those calls are fine and downgrading
-the error message in drm_panel_disable() / drm_panel_unprepare() to a
-debug-level message.
+be turned into a WARN_ON() or somehow made louder.
+
+At the moment, we expect that we may still encounter the warnings in the
+drm_panel core when using panel-simple and panel-edp. Since those panel
+drivers are used with a lot of different DRM modeset drivers they still
+make an extra effort to disable/unprepare the panel themsevles at shutdown
+time. Specifically we could still encounter those warnings if the panel
+driver gets shutdown() _before_ the DRM modeset driver and the DRM modeset
+driver properly calls drm_atomic_helper_shutdown() in its own shutdown()
+callback. Warnings could be avoided in such a case by using something like
+device links to ensure that the panel gets shutdown() after the DRM modeset
+driver.
+
+Once all DRM modeset drivers are known to shutdown properly, the extra
+calls to disable/unprepare in remove/shutdown in panel-simple and panel-edp
+should be removed and this TODO item marked complete.
Contact: Douglas Anderson <[email protected]>
diff --git a/Documentation/netlink/specs/ethtool.yaml b/Documentation/netlink/specs/ethtool.yaml
index 495e35fcfb21..ea21fe135b97 100644
--- a/Documentation/netlink/specs/ethtool.yaml
+++ b/Documentation/netlink/specs/ethtool.yaml
@@ -1753,6 +1753,7 @@ operations:
request:
attributes:
- header
+ - context
reply:
attributes:
- header
@@ -1761,7 +1762,6 @@ operations:
- indir
- hkey
- input_xfrm
- dump: *rss-get-op
-
name: plca-get-cfg
doc: Get PLCA params.
diff --git a/Documentation/networking/ethtool-netlink.rst b/Documentation/networking/ethtool-netlink.rst
index 3ab423b80e91..d5f246aceb9f 100644
--- a/Documentation/networking/ethtool-netlink.rst
+++ b/Documentation/networking/ethtool-netlink.rst
@@ -1875,6 +1875,7 @@ Kernel response contents:
===================================== ====== ==========================
``ETHTOOL_A_RSS_HEADER`` nested reply header
+ ``ETHTOOL_A_RSS_CONTEXT`` u32 context number
``ETHTOOL_A_RSS_HFUNC`` u32 RSS hash func
``ETHTOOL_A_RSS_INDIR`` binary Indir table bytes
``ETHTOOL_A_RSS_HKEY`` binary Hash key bytes
diff --git a/Documentation/userspace-api/media/v4l/pixfmt-yuv-luma.rst b/Documentation/userspace-api/media/v4l/pixfmt-yuv-luma.rst
index f02e6cf3516a..74df19be91f6 100644
--- a/Documentation/userspace-api/media/v4l/pixfmt-yuv-luma.rst
+++ b/Documentation/userspace-api/media/v4l/pixfmt-yuv-luma.rst
@@ -21,9 +21,9 @@ are often referred to as greyscale formats.
.. raw:: latex
- \scriptsize
+ \tiny
-.. tabularcolumns:: |p{3.6cm}|p{3.0cm}|p{1.3cm}|p{2.6cm}|p{1.3cm}|p{1.3cm}|p{1.3cm}|
+.. tabularcolumns:: |p{3.6cm}|p{2.4cm}|p{1.3cm}|p{1.3cm}|p{1.3cm}|p{1.3cm}|p{1.3cm}|p{1.3cm}|p{1.3cm}|
.. flat-table:: Luma-Only Image Formats
:header-rows: 1
diff --git a/Documentation/virt/kvm/api.rst b/Documentation/virt/kvm/api.rst
index fe722c5dada9..33938468d62d 100644
--- a/Documentation/virt/kvm/api.rst
+++ b/Documentation/virt/kvm/api.rst
@@ -6368,7 +6368,7 @@ a single guest_memfd file, but the bound ranges must not overlap).
See KVM_SET_USER_MEMORY_REGION2 for additional details.
4.143 KVM_PRE_FAULT_MEMORY
-------------------------
+---------------------------
:Capability: KVM_CAP_PRE_FAULT_MEMORY
:Architectures: none
@@ -6405,6 +6405,12 @@ for the current vCPU state. KVM maps memory as if the vCPU generated a
stage-2 read page fault, e.g. faults in memory as needed, but doesn't break
CoW. However, KVM does not mark any newly created stage-2 PTE as Accessed.
+In the case of confidential VM types where there is an initial set up of
+private guest memory before the guest is 'finalized'/measured, this ioctl
+should only be issued after completing all the necessary setup to put the
+guest into a 'finalized' state so that the above semantics can be reliably
+ensured.
+
In some cases, multiple vCPUs might share the page tables. In this
case, the ioctl can be called in parallel.
diff --git a/MAINTAINERS b/MAINTAINERS
index 42decde38320..a1537a149e9a 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1013,6 +1013,13 @@ S: Supported
T: git https://gitlab.freedesktop.org/agd5f/linux.git
F: drivers/gpu/drm/amd/display/
+AMD DISPLAY CORE - DML
+M: Chaitanya Dhere <[email protected]>
+M: Jun Lei <[email protected]>
+S: Supported
+F: drivers/gpu/drm/amd/display/dc/dml/
+F: drivers/gpu/drm/amd/display/dc/dml2/
+
AMD FAM15H PROCESSOR POWER MONITORING DRIVER
M: Huang Rui <[email protected]>
@@ -1754,6 +1761,7 @@ L: [email protected]
S: Supported
T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: Documentation/gpu/panfrost.rst
+F: drivers/gpu/drm/ci/xfails/panfrost*
F: drivers/gpu/drm/panfrost/
F: include/uapi/drm/panfrost_drm.h
@@ -6660,6 +6668,7 @@ F: drivers/dma-buf/dma-heap.c
F: drivers/dma-buf/heaps/*
F: include/linux/dma-heap.h
F: include/uapi/linux/dma-heap.h
+F: tools/testing/selftests/dmabuf-heaps/
DMC FREQUENCY DRIVER FOR SAMSUNG EXYNOS5422
M: Lukasz Luba <[email protected]>
@@ -10963,6 +10972,7 @@ T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: Documentation/devicetree/bindings/gpu/img,powervr-rogue.yaml
F: Documentation/devicetree/bindings/gpu/img,powervr-sgx.yaml
F: Documentation/gpu/imagination/
+F: drivers/gpu/drm/ci/xfails/powervr*
F: drivers/gpu/drm/imagination/
F: include/uapi/drm/pvr_drm.h
@@ -15936,6 +15946,7 @@ F: include/linux/in.h
F: include/linux/indirect_call_wrapper.h
F: include/linux/net.h
F: include/linux/netdevice.h
+F: include/linux/skbuff.h
F: include/net/
F: include/uapi/linux/in.h
F: include/uapi/linux/net.h
@@ -18556,7 +18567,7 @@ F: drivers/usb/misc/qcom_eud.c
QCOM IPA DRIVER
M: Alex Elder <[email protected]>
-S: Supported
+S: Maintained
F: drivers/net/ipa/
QEMU MACHINE EMULATOR AND VIRTUALIZER SUPPORT
diff --git a/Makefile b/Makefile
index 8ad55d6e7b60..44c02a6f60a1 100644
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
VERSION = 6
PATCHLEVEL = 11
SUBLEVEL = 0
-EXTRAVERSION = -rc1
+EXTRAVERSION = -rc2
NAME = Baby Opossum Posse
# *DOCUMENTATION*
diff --git a/arch/alpha/include/asm/io.h b/arch/alpha/include/asm/io.h
index 2bb8cbeedf91..b191d87f89c4 100644
--- a/arch/alpha/include/asm/io.h
+++ b/arch/alpha/include/asm/io.h
@@ -534,8 +534,10 @@ extern inline void writeq(u64 b, volatile void __iomem *addr)
#define ioread16be(p) swab16(ioread16(p))
#define ioread32be(p) swab32(ioread32(p))
+#define ioread64be(p) swab64(ioread64(p))
#define iowrite16be(v,p) iowrite16(swab16(v), (p))
#define iowrite32be(v,p) iowrite32(swab32(v), (p))
+#define iowrite64be(v,p) iowrite64(swab64(v), (p))
#define inb_p inb
#define inw_p inw
@@ -634,8 +636,6 @@ extern void outsl (unsigned long port, const void *src, unsigned long count);
*/
#define ioread64 ioread64
#define iowrite64 iowrite64
-#define ioread64be ioread64be
-#define iowrite64be iowrite64be
#define ioread8_rep ioread8_rep
#define ioread16_rep ioread16_rep
#define ioread32_rep ioread32_rep
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 954a1916a500..54b2bb817a7f 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -87,6 +87,7 @@ config ARM
select HAVE_ARCH_PFN_VALID
select HAVE_ARCH_SECCOMP
select HAVE_ARCH_SECCOMP_FILTER if AEABI && !OABI_COMPAT
+ select HAVE_ARCH_STACKLEAK
select HAVE_ARCH_THREAD_STRUCT_WHITELIST
select HAVE_ARCH_TRACEHOOK
select HAVE_ARCH_TRANSPARENT_HUGEPAGE if ARM_LPAE
@@ -116,6 +117,7 @@ config ARM
select HAVE_KERNEL_XZ
select HAVE_KPROBES if !XIP_KERNEL && !CPU_ENDIAN_BE32 && !CPU_V7M
select HAVE_KRETPROBES if HAVE_KPROBES
+ select HAVE_LD_DEAD_CODE_DATA_ELIMINATION
select HAVE_MOD_ARCH_SPECIFIC
select HAVE_NMI
select HAVE_OPTPROBES if !THUMB2_KERNEL
@@ -736,7 +738,7 @@ config ARM_ERRATA_764319
bool "ARM errata: Read to DBGPRSR and DBGOSLSR may generate Undefined instruction"
depends on CPU_V7
help
- This option enables the workaround for the 764319 Cortex A-9 erratum.
+ This option enables the workaround for the 764319 Cortex-A9 erratum.
CP14 read accesses to the DBGPRSR and DBGOSLSR registers generate an
unexpected Undefined Instruction exception when the DBGSWENABLE
external pin is set to 0, even when the CP14 accesses are performed
diff --git a/arch/arm/boot/compressed/Makefile b/arch/arm/boot/compressed/Makefile
index 6bca03c0c7f0..945b5975fce2 100644
--- a/arch/arm/boot/compressed/Makefile
+++ b/arch/arm/boot/compressed/Makefile
@@ -9,6 +9,7 @@ OBJS =
HEAD = head.o
OBJS += misc.o decompress.o
+CFLAGS_decompress.o += $(DISABLE_STACKLEAK_PLUGIN)
ifeq ($(CONFIG_DEBUG_UNCOMPRESS),y)
OBJS += debug.o
AFLAGS_head.o += -DDEBUG
diff --git a/arch/arm/boot/compressed/vmlinux.lds.S b/arch/arm/boot/compressed/vmlinux.lds.S
index 3fcb3e62dc56..d411abd4310e 100644
--- a/arch/arm/boot/compressed/vmlinux.lds.S
+++ b/arch/arm/boot/compressed/vmlinux.lds.S
@@ -125,7 +125,7 @@ SECTIONS
. = BSS_START;
__bss_start = .;
- .bss : { *(.bss) }
+ .bss : { *(.bss .bss.*) }
_end = .;
. = ALIGN(8); /* the stack must be 64-bit aligned */
diff --git a/arch/arm/boot/dts/arm/versatile-ab.dts b/arch/arm/boot/dts/arm/versatile-ab.dts
index 6fe6b49f5d8e..635ab9268899 100644
--- a/arch/arm/boot/dts/arm/versatile-ab.dts
+++ b/arch/arm/boot/dts/arm/versatile-ab.dts
@@ -157,7 +157,7 @@
clocks = <&xtal24mhz>;
};
- pclk: clock-24000000 {
+ pclk: clock-pclk {
#clock-cells = <0>;
compatible = "fixed-factor-clock";
clock-div = <1>;
diff --git a/arch/arm/include/asm/stacktrace.h b/arch/arm/include/asm/stacktrace.h
index 360f0d2406bf..f80a85b091d6 100644
--- a/arch/arm/include/asm/stacktrace.h
+++ b/arch/arm/include/asm/stacktrace.h
@@ -26,6 +26,13 @@ struct stackframe {
#endif
};
+static inline bool on_thread_stack(void)
+{
+ unsigned long delta = current_stack_pointer ^ (unsigned long)current->stack;
+
+ return delta < THREAD_SIZE;
+}
+
static __always_inline
void arm_get_current_stackframe(struct pt_regs *regs, struct stackframe *frame)
{
diff --git a/arch/arm/include/asm/vmlinux.lds.h b/arch/arm/include/asm/vmlinux.lds.h
index 4c8632d5c432..d60f6e83a9f7 100644
--- a/arch/arm/include/asm/vmlinux.lds.h
+++ b/arch/arm/include/asm/vmlinux.lds.h
@@ -42,7 +42,7 @@
#define PROC_INFO \
. = ALIGN(4); \
__proc_info_begin = .; \
- *(.proc.info.init) \
+ KEEP(*(.proc.info.init)) \
__proc_info_end = .;
#define IDMAP_TEXT \
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
index 6150a716828c..f01d23a220e6 100644
--- a/arch/arm/kernel/entry-armv.S
+++ b/arch/arm/kernel/entry-armv.S
@@ -1065,6 +1065,7 @@ vector_addrexcptn:
.globl vector_fiq
.section .vectors, "ax", %progbits
+ .reloc .text, R_ARM_NONE, .
W(b) vector_rst
W(b) vector_und
ARM( .reloc ., R_ARM_LDR_PC_G0, .L__vector_swi )
@@ -1078,6 +1079,7 @@ THUMB( .reloc ., R_ARM_THM_PC12, .L__vector_swi )
#ifdef CONFIG_HARDEN_BRANCH_HISTORY
.section .vectors.bhb.loop8, "ax", %progbits
+ .reloc .text, R_ARM_NONE, .
W(b) vector_rst
W(b) vector_bhb_loop8_und
ARM( .reloc ., R_ARM_LDR_PC_G0, .L__vector_bhb_loop8_swi )
@@ -1090,6 +1092,7 @@ THUMB( .reloc ., R_ARM_THM_PC12, .L__vector_bhb_loop8_swi )
W(b) vector_bhb_loop8_fiq
.section .vectors.bhb.bpiall, "ax", %progbits
+ .reloc .text, R_ARM_NONE, .
W(b) vector_rst
W(b) vector_bhb_bpiall_und
ARM( .reloc ., R_ARM_LDR_PC_G0, .L__vector_bhb_bpiall_swi )
diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
index 5c31e9de7a60..f379c852dcb7 100644
--- a/arch/arm/kernel/entry-common.S
+++ b/arch/arm/kernel/entry-common.S
@@ -119,6 +119,9 @@ no_work_pending:
ct_user_enter save = 0
+#ifdef CONFIG_GCC_PLUGIN_STACKLEAK
+ bl stackleak_erase_on_task_stack
+#endif
restore_user_regs fast = 0, offset = 0
ENDPROC(ret_to_user_from_irq)
ENDPROC(ret_to_user)
diff --git a/arch/arm/kernel/module.c b/arch/arm/kernel/module.c
index 677f218f7e84..da488d92e7a0 100644
--- a/arch/arm/kernel/module.c
+++ b/arch/arm/kernel/module.c
@@ -395,11 +395,6 @@ apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex,
return 0;
}
-struct mod_unwind_map {
- const Elf_Shdr *unw_sec;
- const Elf_Shdr *txt_sec;
-};
-
static const Elf_Shdr *find_mod_section(const Elf32_Ehdr *hdr,
const Elf_Shdr *sechdrs, const char *name)
{
diff --git a/arch/arm/kernel/perf_callchain.c b/arch/arm/kernel/perf_callchain.c
index 7147edbe56c6..1d230ac9d0eb 100644
--- a/arch/arm/kernel/perf_callchain.c
+++ b/arch/arm/kernel/perf_callchain.c
@@ -85,8 +85,7 @@ static bool
callchain_trace(void *data, unsigned long pc)
{
struct perf_callchain_entry_ctx *entry = data;
- perf_callchain_store(entry, pc);
- return true;
+ return perf_callchain_store(entry, pc) == 0;
}
void
diff --git a/arch/arm/kernel/vmlinux-xip.lds.S b/arch/arm/kernel/vmlinux-xip.lds.S
index c16d196b5aad..5eddb75a7174 100644
--- a/arch/arm/kernel/vmlinux-xip.lds.S
+++ b/arch/arm/kernel/vmlinux-xip.lds.S
@@ -63,7 +63,7 @@ SECTIONS
. = ALIGN(4);
__ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) {
__start___ex_table = .;
- ARM_MMU_KEEP(*(__ex_table))
+ ARM_MMU_KEEP(KEEP(*(__ex_table)))
__stop___ex_table = .;
}
@@ -83,7 +83,7 @@ SECTIONS
}
.init.arch.info : {
__arch_info_begin = .;
- *(.arch.info.init)
+ KEEP(*(.arch.info.init))
__arch_info_end = .;
}
.init.tagtable : {
diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S
index bd9127c4b451..de373c6c2ae8 100644
--- a/arch/arm/kernel/vmlinux.lds.S
+++ b/arch/arm/kernel/vmlinux.lds.S
@@ -74,7 +74,7 @@ SECTIONS
. = ALIGN(4);
__ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) {
__start___ex_table = .;
- ARM_MMU_KEEP(*(__ex_table))
+ ARM_MMU_KEEP(KEEP(*(__ex_table)))
__stop___ex_table = .;
}
@@ -99,7 +99,7 @@ SECTIONS
}
.init.arch.info : {
__arch_info_begin = .;
- *(.arch.info.init)
+ KEEP(*(.arch.info.init))
__arch_info_end = .;
}
.init.tagtable : {
@@ -116,7 +116,7 @@ SECTIONS
#endif
.init.pv_table : {
__pv_table_begin = .;
- *(.pv_table)
+ KEEP(*(.pv_table))
__pv_table_end = .;
}
diff --git a/arch/arm/mach-alpine/alpine_cpu_pm.c b/arch/arm/mach-alpine/alpine_cpu_pm.c
index 13ae8412e9ce..b48da6f12b6c 100644
--- a/arch/arm/mach-alpine/alpine_cpu_pm.c
+++ b/arch/arm/mach-alpine/alpine_cpu_pm.c
@@ -29,7 +29,7 @@ int alpine_cpu_wakeup(unsigned int phys_cpu, uint32_t phys_resume_addr)
/*
* Set CPU resume address -
* secure firmware running on boot will jump to this address
- * after setting proper CPU mode, and initialiing e.g. secure
+ * after setting proper CPU mode, and initializing e.g. secure
* regs (the same mode all CPUs are booted to - usually HYP)
*/
writel(phys_resume_addr,
diff --git a/arch/arm/mm/proc.c b/arch/arm/mm/proc.c
index bdbbf65d1b36..2027845efefb 100644
--- a/arch/arm/mm/proc.c
+++ b/arch/arm/mm/proc.c
@@ -17,7 +17,7 @@ void cpu_arm7tdmi_proc_init(void);
__ADDRESSABLE(cpu_arm7tdmi_proc_init);
void cpu_arm7tdmi_proc_fin(void);
__ADDRESSABLE(cpu_arm7tdmi_proc_fin);
-void cpu_arm7tdmi_reset(void);
+void cpu_arm7tdmi_reset(unsigned long addr, bool hvc);
__ADDRESSABLE(cpu_arm7tdmi_reset);
int cpu_arm7tdmi_do_idle(void);
__ADDRESSABLE(cpu_arm7tdmi_do_idle);
@@ -32,7 +32,7 @@ void cpu_arm720_proc_init(void);
__ADDRESSABLE(cpu_arm720_proc_init);
void cpu_arm720_proc_fin(void);
__ADDRESSABLE(cpu_arm720_proc_fin);
-void cpu_arm720_reset(void);
+void cpu_arm720_reset(unsigned long addr, bool hvc);
__ADDRESSABLE(cpu_arm720_reset);
int cpu_arm720_do_idle(void);
__ADDRESSABLE(cpu_arm720_do_idle);
@@ -49,7 +49,7 @@ void cpu_arm740_proc_init(void);
__ADDRESSABLE(cpu_arm740_proc_init);
void cpu_arm740_proc_fin(void);
__ADDRESSABLE(cpu_arm740_proc_fin);
-void cpu_arm740_reset(void);
+void cpu_arm740_reset(unsigned long addr, bool hvc);
__ADDRESSABLE(cpu_arm740_reset);
int cpu_arm740_do_idle(void);
__ADDRESSABLE(cpu_arm740_do_idle);
@@ -64,7 +64,7 @@ void cpu_arm9tdmi_proc_init(void);
__ADDRESSABLE(cpu_arm9tdmi_proc_init);
void cpu_arm9tdmi_proc_fin(void);
__ADDRESSABLE(cpu_arm9tdmi_proc_fin);
-void cpu_arm9tdmi_reset(void);
+void cpu_arm9tdmi_reset(unsigned long addr, bool hvc);
__ADDRESSABLE(cpu_arm9tdmi_reset);
int cpu_arm9tdmi_do_idle(void);
__ADDRESSABLE(cpu_arm9tdmi_do_idle);
@@ -79,7 +79,7 @@ void cpu_arm920_proc_init(void);
__ADDRESSABLE(cpu_arm920_proc_init);
void cpu_arm920_proc_fin(void);
__ADDRESSABLE(cpu_arm920_proc_fin);
-void cpu_arm920_reset(void);
+void cpu_arm920_reset(unsigned long addr, bool hvc);
__ADDRESSABLE(cpu_arm920_reset);
int cpu_arm920_do_idle(void);
__ADDRESSABLE(cpu_arm920_do_idle);
@@ -102,7 +102,7 @@ void cpu_arm922_proc_init(void);
__ADDRESSABLE(cpu_arm922_proc_init);
void cpu_arm922_proc_fin(void);
__ADDRESSABLE(cpu_arm922_proc_fin);
-void cpu_arm922_reset(void);
+void cpu_arm922_reset(unsigned long addr, bool hvc);
__ADDRESSABLE(cpu_arm922_reset);
int cpu_arm922_do_idle(void);
__ADDRESSABLE(cpu_arm922_do_idle);
@@ -119,7 +119,7 @@ void cpu_arm925_proc_init(void);
__ADDRESSABLE(cpu_arm925_proc_init);
void cpu_arm925_proc_fin(void);
__ADDRESSABLE(cpu_arm925_proc_fin);
-void cpu_arm925_reset(void);
+void cpu_arm925_reset(unsigned long addr, bool hvc);
__ADDRESSABLE(cpu_arm925_reset);
int cpu_arm925_do_idle(void);
__ADDRESSABLE(cpu_arm925_do_idle);
@@ -159,7 +159,7 @@ void cpu_arm940_proc_init(void);
__ADDRESSABLE(cpu_arm940_proc_init);
void cpu_arm940_proc_fin(void);
__ADDRESSABLE(cpu_arm940_proc_fin);
-void cpu_arm940_reset(void);
+void cpu_arm940_reset(unsigned long addr, bool hvc);
__ADDRESSABLE(cpu_arm940_reset);
int cpu_arm940_do_idle(void);
__ADDRESSABLE(cpu_arm940_do_idle);
@@ -174,7 +174,7 @@ void cpu_arm946_proc_init(void);
__ADDRESSABLE(cpu_arm946_proc_init);
void cpu_arm946_proc_fin(void);
__ADDRESSABLE(cpu_arm946_proc_fin);
-void cpu_arm946_reset(void);
+void cpu_arm946_reset(unsigned long addr, bool hvc);
__ADDRESSABLE(cpu_arm946_reset);
int cpu_arm946_do_idle(void);
__ADDRESSABLE(cpu_arm946_do_idle);
@@ -429,7 +429,7 @@ void cpu_v7_proc_init(void);
__ADDRESSABLE(cpu_v7_proc_init);
void cpu_v7_proc_fin(void);
__ADDRESSABLE(cpu_v7_proc_fin);
-void cpu_v7_reset(void);
+void cpu_v7_reset(unsigned long addr, bool hvc);
__ADDRESSABLE(cpu_v7_reset);
int cpu_v7_do_idle(void);
__ADDRESSABLE(cpu_v7_do_idle);
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index b3fc891f1544..a2f8ff354ca6 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -1069,18 +1069,28 @@ config ARM64_ERRATUM_3117295
If unsure, say Y.
config ARM64_ERRATUM_3194386
- bool "Cortex-{A720,X4,X925}/Neoverse-V3: workaround for MSR SSBS not self-synchronizing"
+ bool "Cortex-*/Neoverse-*: workaround for MSR SSBS not self-synchronizing"
default y
help
This option adds the workaround for the following errata:
+ * ARM Cortex-A76 erratum 3324349
+ * ARM Cortex-A77 erratum 3324348
+ * ARM Cortex-A78 erratum 3324344
+ * ARM Cortex-A78C erratum 3324346
+ * ARM Cortex-A78C erratum 3324347
* ARM Cortex-A710 erratam 3324338
* ARM Cortex-A720 erratum 3456091
+ * ARM Cortex-A725 erratum 3456106
+ * ARM Cortex-X1 erratum 3324344
+ * ARM Cortex-X1C erratum 3324346
* ARM Cortex-X2 erratum 3324338
* ARM Cortex-X3 erratum 3324335
* ARM Cortex-X4 erratum 3194386
* ARM Cortex-X925 erratum 3324334
+ * ARM Neoverse-N1 erratum 3324349
* ARM Neoverse N2 erratum 3324339
+ * ARM Neoverse-V1 erratum 3324341
* ARM Neoverse V2 erratum 3324336
* ARM Neoverse-V3 erratum 3312417
@@ -1088,11 +1098,11 @@ config ARM64_ERRATUM_3194386
subsequent speculative instructions, which may permit unexepected
speculative store bypassing.
- Work around this problem by placing a speculation barrier after
- kernel changes to SSBS. The presence of the SSBS special-purpose
- register is hidden from hwcaps and EL0 reads of ID_AA64PFR1_EL1, such
- that userspace will use the PR_SPEC_STORE_BYPASS prctl to change
- SSBS.
+ Work around this problem by placing a Speculation Barrier (SB) or
+ Instruction Synchronization Barrier (ISB) after kernel changes to
+ SSBS. The presence of the SSBS special-purpose register is hidden
+ from hwcaps and EL0 reads of ID_AA64PFR1_EL1, such that userspace
+ will use the PR_SPEC_STORE_BYPASS prctl to change SSBS.
If unsure, say Y.
diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h
index 1cb0704c6163..5fd7caea4419 100644
--- a/arch/arm64/include/asm/cputype.h
+++ b/arch/arm64/include/asm/cputype.h
@@ -86,12 +86,14 @@
#define ARM_CPU_PART_CORTEX_X2 0xD48
#define ARM_CPU_PART_NEOVERSE_N2 0xD49
#define ARM_CPU_PART_CORTEX_A78C 0xD4B
+#define ARM_CPU_PART_CORTEX_X1C 0xD4C
#define ARM_CPU_PART_CORTEX_X3 0xD4E
#define ARM_CPU_PART_NEOVERSE_V2 0xD4F
#define ARM_CPU_PART_CORTEX_A720 0xD81
#define ARM_CPU_PART_CORTEX_X4 0xD82
#define ARM_CPU_PART_NEOVERSE_V3 0xD84
#define ARM_CPU_PART_CORTEX_X925 0xD85
+#define ARM_CPU_PART_CORTEX_A725 0xD87
#define APM_CPU_PART_XGENE 0x000
#define APM_CPU_VAR_POTENZA 0x00
@@ -165,12 +167,14 @@
#define MIDR_CORTEX_X2 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X2)
#define MIDR_NEOVERSE_N2 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_N2)
#define MIDR_CORTEX_A78C MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A78C)
+#define MIDR_CORTEX_X1C MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X1C)
#define MIDR_CORTEX_X3 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X3)
#define MIDR_NEOVERSE_V2 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_V2)
#define MIDR_CORTEX_A720 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A720)
#define MIDR_CORTEX_X4 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X4)
#define MIDR_NEOVERSE_V3 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_V3)
#define MIDR_CORTEX_X925 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X925)
+#define MIDR_CORTEX_A725 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A725)
#define MIDR_THUNDERX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX)
#define MIDR_THUNDERX_81XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_81XX)
#define MIDR_THUNDERX_83XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_83XX)
diff --git a/arch/arm64/include/asm/jump_label.h b/arch/arm64/include/asm/jump_label.h
index 4e753908b801..a0a5bbae7229 100644
--- a/arch/arm64/include/asm/jump_label.h
+++ b/arch/arm64/include/asm/jump_label.h
@@ -13,6 +13,7 @@
#include <linux/types.h>
#include <asm/insn.h>
+#define HAVE_JUMP_LABEL_BATCH
#define JUMP_LABEL_NOP_SIZE AARCH64_INSN_SIZE
#define JUMP_TABLE_ENTRY(key, label) \
diff --git a/arch/arm64/kernel/Makefile.syscalls b/arch/arm64/kernel/Makefile.syscalls
index 3cfafd003b2d..0542a718871a 100644
--- a/arch/arm64/kernel/Makefile.syscalls
+++ b/arch/arm64/kernel/Makefile.syscalls
@@ -1,6 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
syscall_abis_32 +=
-syscall_abis_64 += renameat newstat rlimit memfd_secret
+syscall_abis_64 += renameat rlimit memfd_secret
syscalltbl = arch/arm64/tools/syscall_%.tbl
diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
index 617424b73f8c..f6b6b4507357 100644
--- a/arch/arm64/kernel/cpu_errata.c
+++ b/arch/arm64/kernel/cpu_errata.c
@@ -434,15 +434,24 @@ static const struct midr_range erratum_spec_unpriv_load_list[] = {
#ifdef CONFIG_ARM64_ERRATUM_3194386
static const struct midr_range erratum_spec_ssbs_list[] = {
+ MIDR_ALL_VERSIONS(MIDR_CORTEX_A76),
+ MIDR_ALL_VERSIONS(MIDR_CORTEX_A77),
+ MIDR_ALL_VERSIONS(MIDR_CORTEX_A78),
+ MIDR_ALL_VERSIONS(MIDR_CORTEX_A78C),
MIDR_ALL_VERSIONS(MIDR_CORTEX_A710),
MIDR_ALL_VERSIONS(MIDR_CORTEX_A720),
+ MIDR_ALL_VERSIONS(MIDR_CORTEX_A725),
+ MIDR_ALL_VERSIONS(MIDR_CORTEX_X1),
+ MIDR_ALL_VERSIONS(MIDR_CORTEX_X1C),
MIDR_ALL_VERSIONS(MIDR_CORTEX_X2),
MIDR_ALL_VERSIONS(MIDR_CORTEX_X3),
MIDR_ALL_VERSIONS(MIDR_CORTEX_X4),
MIDR_ALL_VERSIONS(MIDR_CORTEX_X925),
+ MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N1),
MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N2),
- MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V3),
+ MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V1),
MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V2),
+ MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V3),
{}
};
#endif
diff --git a/arch/arm64/kernel/jump_label.c b/arch/arm64/kernel/jump_label.c
index faf88ec9c48e..f63ea915d6ad 100644
--- a/arch/arm64/kernel/jump_label.c
+++ b/arch/arm64/kernel/jump_label.c
@@ -7,11 +7,12 @@
*/
#include <linux/kernel.h>
#include <linux/jump_label.h>
+#include <linux/smp.h>
#include <asm/insn.h>
#include <asm/patching.h>
-void arch_jump_label_transform(struct jump_entry *entry,
- enum jump_label_type type)
+bool arch_jump_label_transform_queue(struct jump_entry *entry,
+ enum jump_label_type type)
{
void *addr = (void *)jump_entry_code(entry);
u32 insn;
@@ -25,4 +26,10 @@ void arch_jump_label_transform(struct jump_entry *entry,
}
aarch64_insn_patch_text_nosync(addr, insn);
+ return true;
+}
+
+void arch_jump_label_transform_apply(void)
+{
+ kick_all_cpus_sync();
}
diff --git a/arch/loongarch/kernel/Makefile.syscalls b/arch/loongarch/kernel/Makefile.syscalls
index 523bb411a3bc..ab7d9baa2915 100644
--- a/arch/loongarch/kernel/Makefile.syscalls
+++ b/arch/loongarch/kernel/Makefile.syscalls
@@ -1,3 +1,4 @@
# SPDX-License-Identifier: GPL-2.0
-syscall_abis_64 += newstat
+# No special ABIs on loongarch so far
+syscall_abis_64 +=
diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig
index 5d650e02cbf4..b0a2ac3ba916 100644
--- a/arch/parisc/Kconfig
+++ b/arch/parisc/Kconfig
@@ -20,6 +20,7 @@ config PARISC
select ARCH_SUPPORTS_HUGETLBFS if PA20
select ARCH_SUPPORTS_MEMORY_FAILURE
select ARCH_STACKWALK
+ select ARCH_HAS_CACHE_LINE_SIZE
select ARCH_HAS_DEBUG_VM_PGTABLE
select HAVE_RELIABLE_STACKTRACE
select DMA_OPS
diff --git a/arch/parisc/include/asm/cache.h b/arch/parisc/include/asm/cache.h
index 2a60d7a72f1f..a3f0f100f219 100644
--- a/arch/parisc/include/asm/cache.h
+++ b/arch/parisc/include/asm/cache.h
@@ -20,7 +20,16 @@
#define SMP_CACHE_BYTES L1_CACHE_BYTES
-#define ARCH_DMA_MINALIGN L1_CACHE_BYTES
+#ifdef CONFIG_PA20
+#define ARCH_DMA_MINALIGN 128
+#else
+#define ARCH_DMA_MINALIGN 32
+#endif
+#define ARCH_KMALLOC_MINALIGN 16 /* ldcw requires 16-byte alignment */
+
+#define arch_slab_minalign() ((unsigned)dcache_stride)
+#define cache_line_size() dcache_stride
+#define dma_get_cache_alignment cache_line_size
#define __read_mostly __section(".data..read_mostly")
diff --git a/arch/parisc/net/bpf_jit_core.c b/arch/parisc/net/bpf_jit_core.c
index 979f45d4d1fb..06cbcd6fe87b 100644
--- a/arch/parisc/net/bpf_jit_core.c
+++ b/arch/parisc/net/bpf_jit_core.c
@@ -114,7 +114,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
jit_data->header =
bpf_jit_binary_alloc(prog_size + extable_size,
&jit_data->image,
- sizeof(u32),
+ sizeof(long),
bpf_fill_ill_insns);
if (!jit_data->header) {
prog = orig_prog;
diff --git a/arch/powerpc/kernel/nvram_64.c b/arch/powerpc/kernel/nvram_64.c
index e385d3164648..f9c6568a9137 100644
--- a/arch/powerpc/kernel/nvram_64.c
+++ b/arch/powerpc/kernel/nvram_64.c
@@ -73,7 +73,7 @@ static const char *nvram_os_partitions[] = {
};
static void oops_to_nvram(struct kmsg_dumper *dumper,
- enum kmsg_dump_reason reason);
+ struct kmsg_dump_detail *detail);
static struct kmsg_dumper nvram_kmsg_dumper = {
.dump = oops_to_nvram
@@ -643,7 +643,7 @@ void __init nvram_init_oops_partition(int rtas_partition_exists)
* partition. If that's too much, go back and capture uncompressed text.
*/
static void oops_to_nvram(struct kmsg_dumper *dumper,
- enum kmsg_dump_reason reason)
+ struct kmsg_dump_detail *detail)
{
struct oops_log_info *oops_hdr = (struct oops_log_info *)oops_buf;
static unsigned int oops_count = 0;
@@ -655,7 +655,7 @@ static void oops_to_nvram(struct kmsg_dumper *dumper,
unsigned int err_type = ERR_TYPE_KERNEL_PANIC_GZ;
int rc = -1;
- switch (reason) {
+ switch (detail->reason) {
case KMSG_DUMP_SHUTDOWN:
/* These are almost always orderly shutdowns. */
return;
@@ -671,7 +671,7 @@ static void oops_to_nvram(struct kmsg_dumper *dumper,
break;
default:
pr_err("%s: ignoring unrecognized KMSG_DUMP_* reason %d\n",
- __func__, (int) reason);
+ __func__, (int) detail->reason);
return;
}
diff --git a/arch/powerpc/platforms/powernv/opal-kmsg.c b/arch/powerpc/platforms/powernv/opal-kmsg.c
index 6c3bc4b4da98..bb4218fa796e 100644
--- a/arch/powerpc/platforms/powernv/opal-kmsg.c
+++ b/arch/powerpc/platforms/powernv/opal-kmsg.c
@@ -20,13 +20,13 @@
* message, it just ensures that OPAL completely flushes the console buffer.
*/
static void kmsg_dump_opal_console_flush(struct kmsg_dumper *dumper,
- enum kmsg_dump_reason reason)
+ struct kmsg_dump_detail *detail)
{
/*
* Outside of a panic context the pollers will continue to run,
* so we don't need to do any special flushing.
*/
- if (reason != KMSG_DUMP_PANIC)
+ if (detail->reason != KMSG_DUMP_PANIC)
return;
opal_flush_console(0);
diff --git a/arch/riscv/kernel/Makefile.syscalls b/arch/riscv/kernel/Makefile.syscalls
index 52087a023b3d..9668fd1faf60 100644
--- a/arch/riscv/kernel/Makefile.syscalls
+++ b/arch/riscv/kernel/Makefile.syscalls
@@ -1,4 +1,4 @@
# SPDX-License-Identifier: GPL-2.0
syscall_abis_32 += riscv memfd_secret
-syscall_abis_64 += riscv newstat rlimit memfd_secret
+syscall_abis_64 += riscv rlimit memfd_secret
diff --git a/arch/riscv/kernel/cpufeature.c b/arch/riscv/kernel/cpufeature.c
index 8f20607adb40..b427188b28fc 100644
--- a/arch/riscv/kernel/cpufeature.c
+++ b/arch/riscv/kernel/cpufeature.c
@@ -432,28 +432,26 @@ static void __init riscv_resolve_isa(unsigned long *source_isa,
bitmap_copy(prev_resolved_isa, resolved_isa, RISCV_ISA_EXT_MAX);
for_each_set_bit(bit, source_isa, RISCV_ISA_EXT_MAX) {
ext = riscv_get_isa_ext_data(bit);
- if (!ext)
- continue;
- if (ext->validate) {
+ if (ext && ext->validate) {
ret = ext->validate(ext, resolved_isa);
if (ret == -EPROBE_DEFER) {
loop = true;
continue;
} else if (ret) {
/* Disable the extension entirely */
- clear_bit(ext->id, source_isa);
+ clear_bit(bit, source_isa);
continue;
}
}
- set_bit(ext->id, resolved_isa);
+ set_bit(bit, resolved_isa);
/* No need to keep it in source isa now that it is enabled */
- clear_bit(ext->id, source_isa);
+ clear_bit(bit, source_isa);
/* Single letter extensions get set in hwcap */
- if (ext->id < RISCV_ISA_EXT_BASE)
- *this_hwcap |= isa2hwcap[ext->id];
+ if (bit < RISCV_ISA_EXT_BASE)
+ *this_hwcap |= isa2hwcap[bit];
}
} while (loop && memcmp(prev_resolved_isa, resolved_isa, sizeof(prev_resolved_isa)));
}
diff --git a/arch/riscv/kernel/sbi-ipi.c b/arch/riscv/kernel/sbi-ipi.c
index 1026e22955cc..0cc5559c08d8 100644
--- a/arch/riscv/kernel/sbi-ipi.c
+++ b/arch/riscv/kernel/sbi-ipi.c
@@ -71,7 +71,7 @@ void __init sbi_ipi_init(void)
* the masking/unmasking of virtual IPIs is done
* via generic IPI-Mux
*/
- cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
+ cpuhp_setup_state(CPUHP_AP_IRQ_RISCV_SBI_IPI_STARTING,
"irqchip/sbi-ipi:starting",
sbi_ipi_starting_cpu, NULL);
diff --git a/arch/riscv/mm/fault.c b/arch/riscv/mm/fault.c
index 5224f3733802..a9f2b4af8f3f 100644
--- a/arch/riscv/mm/fault.c
+++ b/arch/riscv/mm/fault.c
@@ -61,26 +61,27 @@ static inline void no_context(struct pt_regs *regs, unsigned long addr)
static inline void mm_fault_error(struct pt_regs *regs, unsigned long addr, vm_fault_t fault)
{
+ if (!user_mode(regs)) {
+ no_context(regs, addr);
+ return;
+ }
+
if (fault & VM_FAULT_OOM) {
/*
* We ran out of memory, call the OOM killer, and return the userspace
* (which will retry the fault, or kill us if we got oom-killed).
*/
- if (!user_mode(regs)) {
- no_context(regs, addr);
- return;
- }
pagefault_out_of_memory();
return;
} else if (fault & (VM_FAULT_SIGBUS | VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE)) {
/* Kernel mode? Handle exceptions or die */
- if (!user_mode(regs)) {
- no_context(regs, addr);
- return;
- }
do_trap(regs, SIGBUS, BUS_ADRERR, addr);
return;
+ } else if (fault & VM_FAULT_SIGSEGV) {
+ do_trap(regs, SIGSEGV, SEGV_MAPERR, addr);
+ return;
}
+
BUG();
}
diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c
index bfa2dea95354..8b698d9609e7 100644
--- a/arch/riscv/mm/init.c
+++ b/arch/riscv/mm/init.c
@@ -234,8 +234,6 @@ static void __init setup_bootmem(void)
*/
memblock_reserve(vmlinux_start, vmlinux_end - vmlinux_start);
- phys_ram_end = memblock_end_of_DRAM();
-
/*
* Make sure we align the start of the memory on a PMD boundary so that
* at worst, we map the linear mapping with PMD mappings.
@@ -251,6 +249,16 @@ static void __init setup_bootmem(void)
kernel_map.va_pa_offset = PAGE_OFFSET - phys_ram_base;
/*
+ * The size of the linear page mapping may restrict the amount of
+ * usable RAM.
+ */
+ if (IS_ENABLED(CONFIG_64BIT)) {
+ max_mapped_addr = __pa(PAGE_OFFSET) + KERN_VIRT_SIZE;
+ memblock_cap_memory_range(phys_ram_base,
+ max_mapped_addr - phys_ram_base);
+ }
+
+ /*
* Reserve physical address space that would be mapped to virtual
* addresses greater than (void *)(-PAGE_SIZE) because:
* - This memory would overlap with ERR_PTR
@@ -266,6 +274,7 @@ static void __init setup_bootmem(void)
memblock_reserve(max_mapped_addr, (phys_addr_t)-max_mapped_addr);
}
+ phys_ram_end = memblock_end_of_DRAM();
min_low_pfn = PFN_UP(phys_ram_base);
max_low_pfn = max_pfn = PFN_DOWN(phys_ram_end);
high_memory = (void *)(__va(PFN_PHYS(max_low_pfn)));
@@ -1284,8 +1293,6 @@ static void __init create_linear_mapping_page_table(void)
if (start <= __pa(PAGE_OFFSET) &&
__pa(PAGE_OFFSET) < end)
start = __pa(PAGE_OFFSET);
- if (end >= __pa(PAGE_OFFSET) + memory_limit)
- end = __pa(PAGE_OFFSET) + memory_limit;
create_linear_mapping_range(start, end, 0, NULL);
}
diff --git a/arch/riscv/purgatory/entry.S b/arch/riscv/purgatory/entry.S
index 5bcf3af903da..0e6ca6d5ae4b 100644
--- a/arch/riscv/purgatory/entry.S
+++ b/arch/riscv/purgatory/entry.S
@@ -7,6 +7,7 @@
* Author: Li Zhengyu ([email protected])
*
*/
+#include <asm/asm.h>
#include <linux/linkage.h>
.text
@@ -34,6 +35,7 @@ SYM_CODE_END(purgatory_start)
.data
+.align LGREG
SYM_DATA(riscv_kernel_entry, .quad 0)
.end
diff --git a/arch/s390/kernel/alternative.h b/arch/s390/kernel/alternative.h
deleted file mode 100644
index e69de29bb2d1..000000000000
--- a/arch/s390/kernel/alternative.h
+++ /dev/null
diff --git a/arch/s390/kernel/fpu.c b/arch/s390/kernel/fpu.c
index fa90bbdc5ef9..6f2e87920288 100644
--- a/arch/s390/kernel/fpu.c
+++ b/arch/s390/kernel/fpu.c
@@ -113,7 +113,7 @@ void load_fpu_state(struct fpu *state, int flags)
int mask;
if (flags & KERNEL_FPC)
- fpu_lfpc(&state->fpc);
+ fpu_lfpc_safe(&state->fpc);
if (!cpu_has_vx()) {
if (flags & KERNEL_VXR_V0V7)
load_fp_regs_vx(state->vxrs);
diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S
index 975c654cf5a5..e67cd409b858 100644
--- a/arch/s390/kernel/vmlinux.lds.S
+++ b/arch/s390/kernel/vmlinux.lds.S
@@ -59,14 +59,6 @@ SECTIONS
} :text = 0x0700
RO_DATA(PAGE_SIZE)
- .data.rel.ro : {
- *(.data.rel.ro .data.rel.ro.*)
- }
- .got : {
- __got_start = .;
- *(.got)
- __got_end = .;
- }
. = ALIGN(PAGE_SIZE);
_sdata = .; /* Start of data section */
@@ -80,6 +72,15 @@ SECTIONS
. = ALIGN(PAGE_SIZE);
__end_ro_after_init = .;
+ .data.rel.ro : {
+ *(.data.rel.ro .data.rel.ro.*)
+ }
+ .got : {
+ __got_start = .;
+ *(.got)
+ __got_end = .;
+ }
+
RW_DATA(0x100, PAGE_SIZE, THREAD_SIZE)
.data.rel : {
*(.data.rel*)
diff --git a/arch/s390/mm/dump_pagetables.c b/arch/s390/mm/dump_pagetables.c
index 98dab3e049de..0a67fcee4414 100644
--- a/arch/s390/mm/dump_pagetables.c
+++ b/arch/s390/mm/dump_pagetables.c
@@ -3,6 +3,7 @@
#include <linux/ptdump.h>
#include <linux/seq_file.h>
#include <linux/debugfs.h>
+#include <linux/sort.h>
#include <linux/mm.h>
#include <linux/kfence.h>
#include <linux/kasan.h>
@@ -15,13 +16,15 @@
static unsigned long max_addr;
struct addr_marker {
+ int is_start;
unsigned long start_address;
const char *name;
};
enum address_markers_idx {
- IDENTITY_BEFORE_NR = 0,
- IDENTITY_BEFORE_END_NR,
+ KVA_NR = 0,
+ LOWCORE_START_NR,
+ LOWCORE_END_NR,
AMODE31_START_NR,
AMODE31_END_NR,
KERNEL_START_NR,
@@ -30,8 +33,8 @@ enum address_markers_idx {
KFENCE_START_NR,
KFENCE_END_NR,
#endif
- IDENTITY_AFTER_NR,
- IDENTITY_AFTER_END_NR,
+ IDENTITY_START_NR,
+ IDENTITY_END_NR,
VMEMMAP_NR,
VMEMMAP_END_NR,
VMALLOC_NR,
@@ -59,43 +62,44 @@ enum address_markers_idx {
};
static struct addr_marker address_markers[] = {
- [IDENTITY_BEFORE_NR] = {0, "Identity Mapping Start"},
- [IDENTITY_BEFORE_END_NR] = {(unsigned long)_stext, "Identity Mapping End"},
- [AMODE31_START_NR] = {0, "Amode31 Area Start"},
- [AMODE31_END_NR] = {0, "Amode31 Area End"},
- [KERNEL_START_NR] = {(unsigned long)_stext, "Kernel Image Start"},
- [KERNEL_END_NR] = {(unsigned long)_end, "Kernel Image End"},
+ [KVA_NR] = {0, 0, "Kernel Virtual Address Space"},
+ [LOWCORE_START_NR] = {1, 0, "Lowcore Start"},
+ [LOWCORE_END_NR] = {0, 0, "Lowcore End"},
+ [IDENTITY_START_NR] = {1, 0, "Identity Mapping Start"},
+ [IDENTITY_END_NR] = {0, 0, "Identity Mapping End"},
+ [AMODE31_START_NR] = {1, 0, "Amode31 Area Start"},
+ [AMODE31_END_NR] = {0, 0, "Amode31 Area End"},
+ [KERNEL_START_NR] = {1, (unsigned long)_stext, "Kernel Image Start"},
+ [KERNEL_END_NR] = {0, (unsigned long)_end, "Kernel Image End"},
#ifdef CONFIG_KFENCE
- [KFENCE_START_NR] = {0, "KFence Pool Start"},
- [KFENCE_END_NR] = {0, "KFence Pool End"},
+ [KFENCE_START_NR] = {1, 0, "KFence Pool Start"},
+ [KFENCE_END_NR] = {0, 0, "KFence Pool End"},
#endif
- [IDENTITY_AFTER_NR] = {(unsigned long)_end, "Identity Mapping Start"},
- [IDENTITY_AFTER_END_NR] = {0, "Identity Mapping End"},
- [VMEMMAP_NR] = {0, "vmemmap Area Start"},
- [VMEMMAP_END_NR] = {0, "vmemmap Area End"},
- [VMALLOC_NR] = {0, "vmalloc Area Start"},
- [VMALLOC_END_NR] = {0, "vmalloc Area End"},
+ [VMEMMAP_NR] = {1, 0, "vmemmap Area Start"},
+ [VMEMMAP_END_NR] = {0, 0, "vmemmap Area End"},
+ [VMALLOC_NR] = {1, 0, "vmalloc Area Start"},
+ [VMALLOC_END_NR] = {0, 0, "vmalloc Area End"},
#ifdef CONFIG_KMSAN
- [KMSAN_VMALLOC_SHADOW_START_NR] = {0, "Kmsan vmalloc Shadow Start"},
- [KMSAN_VMALLOC_SHADOW_END_NR] = {0, "Kmsan vmalloc Shadow End"},
- [KMSAN_VMALLOC_ORIGIN_START_NR] = {0, "Kmsan vmalloc Origins Start"},
- [KMSAN_VMALLOC_ORIGIN_END_NR] = {0, "Kmsan vmalloc Origins End"},
- [KMSAN_MODULES_SHADOW_START_NR] = {0, "Kmsan Modules Shadow Start"},
- [KMSAN_MODULES_SHADOW_END_NR] = {0, "Kmsan Modules Shadow End"},
- [KMSAN_MODULES_ORIGIN_START_NR] = {0, "Kmsan Modules Origins Start"},
- [KMSAN_MODULES_ORIGIN_END_NR] = {0, "Kmsan Modules Origins End"},
+ [KMSAN_VMALLOC_SHADOW_START_NR] = {1, 0, "Kmsan vmalloc Shadow Start"},
+ [KMSAN_VMALLOC_SHADOW_END_NR] = {0, 0, "Kmsan vmalloc Shadow End"},
+ [KMSAN_VMALLOC_ORIGIN_START_NR] = {1, 0, "Kmsan vmalloc Origins Start"},
+ [KMSAN_VMALLOC_ORIGIN_END_NR] = {0, 0, "Kmsan vmalloc Origins End"},
+ [KMSAN_MODULES_SHADOW_START_NR] = {1, 0, "Kmsan Modules Shadow Start"},
+ [KMSAN_MODULES_SHADOW_END_NR] = {0, 0, "Kmsan Modules Shadow End"},
+ [KMSAN_MODULES_ORIGIN_START_NR] = {1, 0, "Kmsan Modules Origins Start"},
+ [KMSAN_MODULES_ORIGIN_END_NR] = {0, 0, "Kmsan Modules Origins End"},
#endif
- [MODULES_NR] = {0, "Modules Area Start"},
- [MODULES_END_NR] = {0, "Modules Area End"},
- [ABS_LOWCORE_NR] = {0, "Lowcore Area Start"},
- [ABS_LOWCORE_END_NR] = {0, "Lowcore Area End"},
- [MEMCPY_REAL_NR] = {0, "Real Memory Copy Area Start"},
- [MEMCPY_REAL_END_NR] = {0, "Real Memory Copy Area End"},
+ [MODULES_NR] = {1, 0, "Modules Area Start"},
+ [MODULES_END_NR] = {0, 0, "Modules Area End"},
+ [ABS_LOWCORE_NR] = {1, 0, "Lowcore Area Start"},
+ [ABS_LOWCORE_END_NR] = {0, 0, "Lowcore Area End"},
+ [MEMCPY_REAL_NR] = {1, 0, "Real Memory Copy Area Start"},
+ [MEMCPY_REAL_END_NR] = {0, 0, "Real Memory Copy Area End"},
#ifdef CONFIG_KASAN
- [KASAN_SHADOW_START_NR] = {KASAN_SHADOW_START, "Kasan Shadow Start"},
- [KASAN_SHADOW_END_NR] = {KASAN_SHADOW_END, "Kasan Shadow End"},
+ [KASAN_SHADOW_START_NR] = {1, KASAN_SHADOW_START, "Kasan Shadow Start"},
+ [KASAN_SHADOW_END_NR] = {0, KASAN_SHADOW_END, "Kasan Shadow End"},
#endif
- { -1, NULL }
+ {1, -1UL, NULL}
};
struct pg_state {
@@ -163,6 +167,19 @@ static void note_prot_wx(struct pg_state *st, unsigned long addr)
st->wx_pages += (addr - st->start_address) / PAGE_SIZE;
}
+static void note_page_update_state(struct pg_state *st, unsigned long addr, unsigned int prot, int level)
+{
+ struct seq_file *m = st->seq;
+
+ while (addr >= st->marker[1].start_address) {
+ st->marker++;
+ pt_dump_seq_printf(m, "---[ %s ]---\n", st->marker->name);
+ }
+ st->start_address = addr;
+ st->current_prot = prot;
+ st->level = level;
+}
+
static void note_page(struct ptdump_state *pt_st, unsigned long addr, int level, u64 val)
{
int width = sizeof(unsigned long) * 2;
@@ -186,9 +203,7 @@ static void note_page(struct ptdump_state *pt_st, unsigned long addr, int level,
addr = max_addr;
if (st->level == -1) {
pt_dump_seq_printf(m, "---[ %s ]---\n", st->marker->name);
- st->start_address = addr;
- st->current_prot = prot;
- st->level = level;
+ note_page_update_state(st, addr, prot, level);
} else if (prot != st->current_prot || level != st->level ||
addr >= st->marker[1].start_address) {
note_prot_wx(st, addr);
@@ -202,13 +217,7 @@ static void note_page(struct ptdump_state *pt_st, unsigned long addr, int level,
}
pt_dump_seq_printf(m, "%9lu%c ", delta, *unit);
print_prot(m, st->current_prot, st->level);
- while (addr >= st->marker[1].start_address) {
- st->marker++;
- pt_dump_seq_printf(m, "---[ %s ]---\n", st->marker->name);
- }
- st->start_address = addr;
- st->current_prot = prot;
- st->level = level;
+ note_page_update_state(st, addr, prot, level);
}
}
@@ -280,22 +289,25 @@ static int ptdump_show(struct seq_file *m, void *v)
DEFINE_SHOW_ATTRIBUTE(ptdump);
#endif /* CONFIG_PTDUMP_DEBUGFS */
-/*
- * Heapsort from lib/sort.c is not a stable sorting algorithm, do a simple
- * insertion sort to preserve the original order of markers with the same
- * start address.
- */
-static void sort_address_markers(void)
+static int ptdump_cmp(const void *a, const void *b)
{
- struct addr_marker tmp;
- int i, j;
+ const struct addr_marker *ama = a;
+ const struct addr_marker *amb = b;
- for (i = 1; i < ARRAY_SIZE(address_markers) - 1; i++) {
- tmp = address_markers[i];
- for (j = i - 1; j >= 0 && address_markers[j].start_address > tmp.start_address; j--)
- address_markers[j + 1] = address_markers[j];
- address_markers[j + 1] = tmp;
- }
+ if (ama->start_address > amb->start_address)
+ return 1;
+ if (ama->start_address < amb->start_address)
+ return -1;
+ /*
+ * If the start addresses of two markers are identical consider the
+ * marker which defines the start of an area higher than the one which
+ * defines the end of an area. This keeps pairs of markers sorted.
+ */
+ if (ama->is_start)
+ return 1;
+ if (amb->is_start)
+ return -1;
+ return 0;
}
static int pt_dump_init(void)
@@ -303,6 +315,8 @@ static int pt_dump_init(void)
#ifdef CONFIG_KFENCE
unsigned long kfence_start = (unsigned long)__kfence_pool;
#endif
+ unsigned long lowcore = (unsigned long)get_lowcore();
+
/*
* Figure out the maximum virtual address being accessible with the
* kernel ASCE. We need this to keep the page table walker functions
@@ -310,7 +324,10 @@ static int pt_dump_init(void)
*/
max_addr = (get_lowcore()->kernel_asce.val & _REGION_ENTRY_TYPE_MASK) >> 2;
max_addr = 1UL << (max_addr * 11 + 31);
- address_markers[IDENTITY_AFTER_END_NR].start_address = ident_map_size;
+ address_markers[LOWCORE_START_NR].start_address = lowcore;
+ address_markers[LOWCORE_END_NR].start_address = lowcore + sizeof(struct lowcore);
+ address_markers[IDENTITY_START_NR].start_address = __identity_base;
+ address_markers[IDENTITY_END_NR].start_address = __identity_base + ident_map_size;
address_markers[AMODE31_START_NR].start_address = (unsigned long)__samode31;
address_markers[AMODE31_END_NR].start_address = (unsigned long)__eamode31;
address_markers[MODULES_NR].start_address = MODULES_VADDR;
@@ -337,7 +354,8 @@ static int pt_dump_init(void)
address_markers[KMSAN_MODULES_ORIGIN_START_NR].start_address = KMSAN_MODULES_ORIGIN_START;
address_markers[KMSAN_MODULES_ORIGIN_END_NR].start_address = KMSAN_MODULES_ORIGIN_END;
#endif
- sort_address_markers();
+ sort(address_markers, ARRAY_SIZE(address_markers) - 1,
+ sizeof(address_markers[0]), ptdump_cmp, NULL);
#ifdef CONFIG_PTDUMP_DEBUGFS
debugfs_create_file("kernel_page_tables", 0400, NULL, NULL, &ptdump_fops);
#endif /* CONFIG_PTDUMP_DEBUGFS */
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
index ddcd39ef4346..e3d258f9e726 100644
--- a/arch/s390/mm/init.c
+++ b/arch/s390/mm/init.c
@@ -108,6 +108,8 @@ void mark_rodata_ro(void)
{
unsigned long size = __end_ro_after_init - __start_ro_after_init;
+ if (MACHINE_HAS_NX)
+ system_ctl_set_bit(0, CR0_INSTRUCTION_EXEC_PROTECTION_BIT);
__set_memory_ro(__start_ro_after_init, __end_ro_after_init);
pr_info("Write protected read-only-after-init data: %luk\n", size >> 10);
}
@@ -170,13 +172,6 @@ void __init mem_init(void)
setup_zero_pages(); /* Setup zeroed pages. */
}
-void free_initmem(void)
-{
- set_memory_rwnx((unsigned long)_sinittext,
- (unsigned long)(_einittext - _sinittext) >> PAGE_SHIFT);
- free_initmem_default(POISON_FREE_INITMEM);
-}
-
unsigned long memory_block_size_bytes(void)
{
/*
diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c
index 41c714e21292..665b8228afeb 100644
--- a/arch/s390/mm/vmem.c
+++ b/arch/s390/mm/vmem.c
@@ -661,7 +661,6 @@ void __init vmem_map_init(void)
{
__set_memory_rox(_stext, _etext);
__set_memory_ro(_etext, __end_rodata);
- __set_memory_rox(_sinittext, _einittext);
__set_memory_rox(__stext_amode31, __etext_amode31);
/*
* If the BEAR-enhancement facility is not installed the first
@@ -670,16 +669,8 @@ void __init vmem_map_init(void)
*/
if (!static_key_enabled(&cpu_has_bear))
set_memory_x(0, 1);
- if (debug_pagealloc_enabled()) {
- /*
- * Use RELOC_HIDE() as long as __va(0) translates to NULL,
- * since performing pointer arithmetic on a NULL pointer
- * has undefined behavior and generates compiler warnings.
- */
- __set_memory_4k(__va(0), RELOC_HIDE(__va(0), ident_map_size));
- }
- if (MACHINE_HAS_NX)
- system_ctl_set_bit(0, CR0_INSTRUCTION_EXEC_PROTECTION_BIT);
+ if (debug_pagealloc_enabled())
+ __set_memory_4k(__va(0), __va(0) + ident_map_size);
pr_info("Write protected kernel read-only data: %luk\n",
(unsigned long)(__end_rodata - _stext) >> 10);
}
diff --git a/arch/um/drivers/mconsole_user.c b/arch/um/drivers/mconsole_user.c
index e24298a734be..a04cd13c6315 100644
--- a/arch/um/drivers/mconsole_user.c
+++ b/arch/um/drivers/mconsole_user.c
@@ -71,7 +71,9 @@ static struct mconsole_command *mconsole_parse(struct mc_request *req)
return NULL;
}
+#ifndef MIN
#define MIN(a,b) ((a)<(b) ? (a):(b))
+#endif
#define STRINGX(x) #x
#define STRING(x) STRINGX(x)
diff --git a/arch/um/kernel/kmsg_dump.c b/arch/um/kernel/kmsg_dump.c
index 4382cf02a6d1..419021175272 100644
--- a/arch/um/kernel/kmsg_dump.c
+++ b/arch/um/kernel/kmsg_dump.c
@@ -8,7 +8,7 @@
#include <os.h>
static void kmsg_dumper_stdout(struct kmsg_dumper *dumper,
- enum kmsg_dump_reason reason)
+ struct kmsg_dump_detail *detail)
{
static struct kmsg_dump_iter iter;
static DEFINE_SPINLOCK(lock);
diff --git a/arch/x86/coco/sev/core.c b/arch/x86/coco/sev/core.c
index 082d61d85dfc..de1df0cb45da 100644
--- a/arch/x86/coco/sev/core.c
+++ b/arch/x86/coco/sev/core.c
@@ -163,7 +163,7 @@ struct sev_config {
*/
use_cas : 1,
- __reserved : 62;
+ __reserved : 61;
};
static struct sev_config sev_cfg __read_mostly;
diff --git a/arch/x86/entry/syscalls/syscall_64.tbl b/arch/x86/entry/syscalls/syscall_64.tbl
index 83073fa3c989..7093ee21c0d1 100644
--- a/arch/x86/entry/syscalls/syscall_64.tbl
+++ b/arch/x86/entry/syscalls/syscall_64.tbl
@@ -344,6 +344,7 @@
332 common statx sys_statx
333 common io_pgetevents sys_io_pgetevents
334 common rseq sys_rseq
+335 common uretprobe sys_uretprobe
# don't use numbers 387 through 423, add new calls after the last
# 'common' entry
424 common pidfd_send_signal sys_pidfd_send_signal
@@ -385,7 +386,6 @@
460 common lsm_set_self_attr sys_lsm_set_self_attr
461 common lsm_list_modules sys_lsm_list_modules
462 common mseal sys_mseal
-467 common uretprobe sys_uretprobe
#
# Due to a historical design error, certain syscalls are numbered differently
diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
index 12f2a0c14d33..be01823b1bb4 100644
--- a/arch/x86/events/core.c
+++ b/arch/x86/events/core.c
@@ -1520,20 +1520,23 @@ static void x86_pmu_start(struct perf_event *event, int flags)
void perf_event_print_debug(void)
{
u64 ctrl, status, overflow, pmc_ctrl, pmc_count, prev_left, fixed;
+ unsigned long *cntr_mask, *fixed_cntr_mask;
+ struct event_constraint *pebs_constraints;
+ struct cpu_hw_events *cpuc;
u64 pebs, debugctl;
- int cpu = smp_processor_id();
- struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
- unsigned long *cntr_mask = hybrid(cpuc->pmu, cntr_mask);
- unsigned long *fixed_cntr_mask = hybrid(cpuc->pmu, fixed_cntr_mask);
- struct event_constraint *pebs_constraints = hybrid(cpuc->pmu, pebs_constraints);
- unsigned long flags;
- int idx;
+ int cpu, idx;
+
+ guard(irqsave)();
+
+ cpu = smp_processor_id();
+ cpuc = &per_cpu(cpu_hw_events, cpu);
+ cntr_mask = hybrid(cpuc->pmu, cntr_mask);
+ fixed_cntr_mask = hybrid(cpuc->pmu, fixed_cntr_mask);
+ pebs_constraints = hybrid(cpuc->pmu, pebs_constraints);
if (!*(u64 *)cntr_mask)
return;
- local_irq_save(flags);
-
if (x86_pmu.version >= 2) {
rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, ctrl);
rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
@@ -1577,7 +1580,6 @@ void perf_event_print_debug(void)
pr_info("CPU#%d: fixed-PMC%d count: %016llx\n",
cpu, idx, pmc_count);
}
- local_irq_restore(flags);
}
void x86_pmu_stop(struct perf_event *event, int flags)
diff --git a/arch/x86/events/intel/cstate.c b/arch/x86/events/intel/cstate.c
index be58cfb012dd..9f116dfc4728 100644
--- a/arch/x86/events/intel/cstate.c
+++ b/arch/x86/events/intel/cstate.c
@@ -64,7 +64,7 @@
* perf code: 0x00
* Available model: SNB,IVB,HSW,BDW,SKL,KNL,GLM,CNL,
* KBL,CML,ICL,ICX,TGL,TNT,RKL,ADL,
- * RPL,SPR,MTL,ARL,LNL
+ * RPL,SPR,MTL,ARL,LNL,SRF
* Scope: Package (physical package)
* MSR_PKG_C3_RESIDENCY: Package C3 Residency Counter.
* perf code: 0x01
@@ -693,7 +693,8 @@ static const struct cstate_model srf_cstates __initconst = {
.core_events = BIT(PERF_CSTATE_CORE_C1_RES) |
BIT(PERF_CSTATE_CORE_C6_RES),
- .pkg_events = BIT(PERF_CSTATE_PKG_C6_RES),
+ .pkg_events = BIT(PERF_CSTATE_PKG_C2_RES) |
+ BIT(PERF_CSTATE_PKG_C6_RES),
.module_events = BIT(PERF_CSTATE_MODULE_C6_RES),
};
diff --git a/arch/x86/include/asm/cmdline.h b/arch/x86/include/asm/cmdline.h
index 6faaf27e8899..6cbd9ae58b21 100644
--- a/arch/x86/include/asm/cmdline.h
+++ b/arch/x86/include/asm/cmdline.h
@@ -2,6 +2,10 @@
#ifndef _ASM_X86_CMDLINE_H
#define _ASM_X86_CMDLINE_H
+#include <asm/setup.h>
+
+extern char builtin_cmdline[COMMAND_LINE_SIZE];
+
int cmdline_find_option_bool(const char *cmdline_ptr, const char *option);
int cmdline_find_option(const char *cmdline_ptr, const char *option,
char *buffer, int bufsize);
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 950a03e0181e..94e7b5a4fafe 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -1305,6 +1305,7 @@ struct kvm_arch {
u8 vm_type;
bool has_private_mem;
bool has_protected_state;
+ bool pre_fault_allowed;
struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES];
struct list_head active_mmu_pages;
struct list_head zapped_obsolete_pages;
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index be5889bded49..1e0fe5f8ab84 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -462,7 +462,7 @@ static void bsp_init_amd(struct cpuinfo_x86 *c)
switch (c->x86_model) {
case 0x00 ... 0x2f:
case 0x40 ... 0x4f:
- case 0x70 ... 0x7f:
+ case 0x60 ... 0x7f:
setup_force_cpu_cap(X86_FEATURE_ZEN5);
break;
default:
diff --git a/arch/x86/kernel/cpu/aperfmperf.c b/arch/x86/kernel/cpu/aperfmperf.c
index b3fa61d45352..0b69bfbf345d 100644
--- a/arch/x86/kernel/cpu/aperfmperf.c
+++ b/arch/x86/kernel/cpu/aperfmperf.c
@@ -306,7 +306,7 @@ static void freq_invariance_enable(void)
WARN_ON_ONCE(1);
return;
}
- static_branch_enable(&arch_scale_freq_key);
+ static_branch_enable_cpuslocked(&arch_scale_freq_key);
register_freq_invariance_syscore_ops();
pr_info("Estimated ratio of average max frequency by base frequency (times 1024): %llu\n", arch_max_freq_ratio);
}
@@ -323,8 +323,10 @@ static void __init bp_init_freq_invariance(void)
if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
return;
- if (intel_set_max_freq_ratio())
+ if (intel_set_max_freq_ratio()) {
+ guard(cpus_read_lock)();
freq_invariance_enable();
+ }
}
static void disable_freq_invariance_workfn(struct work_struct *work)
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index 5d34cad9b7b1..6129dc2ba784 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -164,7 +164,7 @@ unsigned long saved_video_mode;
static char __initdata command_line[COMMAND_LINE_SIZE];
#ifdef CONFIG_CMDLINE_BOOL
-static char __initdata builtin_cmdline[COMMAND_LINE_SIZE] = CONFIG_CMDLINE;
+char builtin_cmdline[COMMAND_LINE_SIZE] = CONFIG_CMDLINE;
bool builtin_cmdline_added __ro_after_init;
#endif
diff --git a/arch/x86/kvm/Kconfig b/arch/x86/kvm/Kconfig
index 4287a8071a3a..472a1537b7a9 100644
--- a/arch/x86/kvm/Kconfig
+++ b/arch/x86/kvm/Kconfig
@@ -141,8 +141,8 @@ config KVM_AMD_SEV
depends on CRYPTO_DEV_SP_PSP && !(KVM_AMD=y && CRYPTO_DEV_CCP_DD=m)
select ARCH_HAS_CC_PLATFORM
select KVM_GENERIC_PRIVATE_MEM
- select HAVE_KVM_GMEM_PREPARE
- select HAVE_KVM_GMEM_INVALIDATE
+ select HAVE_KVM_ARCH_GMEM_PREPARE
+ select HAVE_KVM_ARCH_GMEM_INVALIDATE
help
Provides support for launching Encrypted VMs (SEV) and Encrypted VMs
with Encrypted State (SEV-ES) on AMD processors.
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index a7172ba59ad2..4915acdbfcd8 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -1743,7 +1743,7 @@ static void limit_periodic_timer_frequency(struct kvm_lapic *apic)
s64 min_period = min_timer_period_us * 1000LL;
if (apic->lapic_timer.period < min_period) {
- pr_info_ratelimited(
+ pr_info_once(
"vcpu %i: requested %lld ns "
"lapic timer period limited to %lld ns\n",
apic->vcpu->vcpu_id,
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index 901be9e420a4..928cf84778b0 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -4335,7 +4335,7 @@ static u8 kvm_max_private_mapping_level(struct kvm *kvm, kvm_pfn_t pfn,
if (req_max_level)
max_level = min(max_level, req_max_level);
- return req_max_level;
+ return max_level;
}
static int kvm_faultin_pfn_private(struct kvm_vcpu *vcpu,
@@ -4743,6 +4743,9 @@ long kvm_arch_vcpu_pre_fault_memory(struct kvm_vcpu *vcpu,
u64 end;
int r;
+ if (!vcpu->kvm->arch.pre_fault_allowed)
+ return -EOPNOTSUPP;
+
/*
* reload is efficient when called repeatedly, so we can do it on
* every iteration.
@@ -7510,7 +7513,7 @@ static bool hugepage_has_attrs(struct kvm *kvm, struct kvm_memory_slot *slot,
const unsigned long end = start + KVM_PAGES_PER_HPAGE(level);
if (level == PG_LEVEL_2M)
- return kvm_range_has_memory_attributes(kvm, start, end, attrs);
+ return kvm_range_has_memory_attributes(kvm, start, end, ~0, attrs);
for (gfn = start; gfn < end; gfn += KVM_PAGES_PER_HPAGE(level - 1)) {
if (hugepage_test_mixed(slot, gfn, level - 1) ||
diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c
index a16c873b3232..532df12b43c5 100644
--- a/arch/x86/kvm/svm/sev.c
+++ b/arch/x86/kvm/svm/sev.c
@@ -2279,18 +2279,11 @@ static int sev_gmem_post_populate(struct kvm *kvm, gfn_t gfn_start, kvm_pfn_t pf
bool assigned;
int level;
- if (!kvm_mem_is_private(kvm, gfn)) {
- pr_debug("%s: Failed to ensure GFN 0x%llx has private memory attribute set\n",
- __func__, gfn);
- ret = -EINVAL;
- goto err;
- }
-
ret = snp_lookup_rmpentry((u64)pfn + i, &assigned, &level);
if (ret || assigned) {
pr_debug("%s: Failed to ensure GFN 0x%llx RMP entry is initial shared state, ret: %d assigned: %d\n",
__func__, gfn, ret, assigned);
- ret = -EINVAL;
+ ret = ret ? -EINVAL : -EEXIST;
goto err;
}
@@ -2549,6 +2542,14 @@ static int snp_launch_finish(struct kvm *kvm, struct kvm_sev_cmd *argp)
data->gctx_paddr = __psp_pa(sev->snp_context);
ret = sev_issue_cmd(kvm, SEV_CMD_SNP_LAUNCH_FINISH, data, &argp->error);
+ /*
+ * Now that there will be no more SNP_LAUNCH_UPDATE ioctls, private pages
+ * can be given to the guest simply by marking the RMP entry as private.
+ * This can happen on first access and also with KVM_PRE_FAULT_MEMORY.
+ */
+ if (!ret)
+ kvm->arch.pre_fault_allowed = true;
+
kfree(id_auth);
e_free_id_block:
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index c115d26844f7..d6f252555ab3 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -4949,6 +4949,7 @@ static int svm_vm_init(struct kvm *kvm)
to_kvm_sev_info(kvm)->need_init = true;
kvm->arch.has_private_mem = (type == KVM_X86_SNP_VM);
+ kvm->arch.pre_fault_allowed = !kvm->arch.has_private_mem;
}
if (!pause_filter_count || !pause_filter_thresh)
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index af6c8cf6a37a..ef3d3511e4af 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -12646,6 +12646,9 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
kvm->arch.vm_type = type;
kvm->arch.has_private_mem =
(type == KVM_X86_SW_PROTECTED_VM);
+ /* Decided by the vendor code for other VM types. */
+ kvm->arch.pre_fault_allowed =
+ type == KVM_X86_DEFAULT_VM || type == KVM_X86_SW_PROTECTED_VM;
ret = kvm_page_track_init(kvm);
if (ret)
@@ -13641,19 +13644,14 @@ bool kvm_arch_no_poll(struct kvm_vcpu *vcpu)
}
EXPORT_SYMBOL_GPL(kvm_arch_no_poll);
-#ifdef CONFIG_HAVE_KVM_GMEM_PREPARE
-bool kvm_arch_gmem_prepare_needed(struct kvm *kvm)
-{
- return kvm->arch.vm_type == KVM_X86_SNP_VM;
-}
-
+#ifdef CONFIG_HAVE_KVM_ARCH_GMEM_PREPARE
int kvm_arch_gmem_prepare(struct kvm *kvm, gfn_t gfn, kvm_pfn_t pfn, int max_order)
{
return kvm_x86_call(gmem_prepare)(kvm, pfn, gfn, max_order);
}
#endif
-#ifdef CONFIG_HAVE_KVM_GMEM_INVALIDATE
+#ifdef CONFIG_HAVE_KVM_ARCH_GMEM_INVALIDATE
void kvm_arch_gmem_invalidate(kvm_pfn_t start, kvm_pfn_t end)
{
kvm_x86_call(gmem_invalidate)(start, end);
diff --git a/arch/x86/lib/cmdline.c b/arch/x86/lib/cmdline.c
index 384da1fdd5c6..c65cd5550454 100644
--- a/arch/x86/lib/cmdline.c
+++ b/arch/x86/lib/cmdline.c
@@ -207,18 +207,29 @@ __cmdline_find_option(const char *cmdline, int max_cmdline_size,
int cmdline_find_option_bool(const char *cmdline, const char *option)
{
- if (IS_ENABLED(CONFIG_CMDLINE_BOOL))
- WARN_ON_ONCE(!builtin_cmdline_added);
+ int ret;
- return __cmdline_find_option_bool(cmdline, COMMAND_LINE_SIZE, option);
+ ret = __cmdline_find_option_bool(cmdline, COMMAND_LINE_SIZE, option);
+ if (ret > 0)
+ return ret;
+
+ if (IS_ENABLED(CONFIG_CMDLINE_BOOL) && !builtin_cmdline_added)
+ return __cmdline_find_option_bool(builtin_cmdline, COMMAND_LINE_SIZE, option);
+
+ return ret;
}
int cmdline_find_option(const char *cmdline, const char *option, char *buffer,
int bufsize)
{
- if (IS_ENABLED(CONFIG_CMDLINE_BOOL))
- WARN_ON_ONCE(!builtin_cmdline_added);
+ int ret;
+
+ ret = __cmdline_find_option(cmdline, COMMAND_LINE_SIZE, option, buffer, bufsize);
+ if (ret > 0)
+ return ret;
+
+ if (IS_ENABLED(CONFIG_CMDLINE_BOOL) && !builtin_cmdline_added)
+ return __cmdline_find_option(builtin_cmdline, COMMAND_LINE_SIZE, option, buffer, bufsize);
- return __cmdline_find_option(cmdline, COMMAND_LINE_SIZE, option,
- buffer, bufsize);
+ return ret;
}
diff --git a/arch/x86/lib/getuser.S b/arch/x86/lib/getuser.S
index a314622aa093..d066aecf8aeb 100644
--- a/arch/x86/lib/getuser.S
+++ b/arch/x86/lib/getuser.S
@@ -88,12 +88,14 @@ SYM_FUNC_END(__get_user_4)
EXPORT_SYMBOL(__get_user_4)
SYM_FUNC_START(__get_user_8)
+#ifndef CONFIG_X86_64
+ xor %ecx,%ecx
+#endif
check_range size=8
ASM_STAC
#ifdef CONFIG_X86_64
UACCESS movq (%_ASM_AX),%rdx
#else
- xor %ecx,%ecx
UACCESS movl (%_ASM_AX),%edx
UACCESS movl 4(%_ASM_AX),%ecx
#endif
diff --git a/arch/x86/mm/pti.c b/arch/x86/mm/pti.c
index 2e69abf4f852..bfdf5f45b137 100644
--- a/arch/x86/mm/pti.c
+++ b/arch/x86/mm/pti.c
@@ -374,14 +374,14 @@ pti_clone_pgtable(unsigned long start, unsigned long end,
*/
*target_pmd = *pmd;
- addr += PMD_SIZE;
+ addr = round_up(addr + 1, PMD_SIZE);
} else if (level == PTI_CLONE_PTE) {
/* Walk the page-table down to the pte level */
pte = pte_offset_kernel(pmd, addr);
if (pte_none(*pte)) {
- addr += PAGE_SIZE;
+ addr = round_up(addr + 1, PAGE_SIZE);
continue;
}
@@ -401,7 +401,7 @@ pti_clone_pgtable(unsigned long start, unsigned long end,
/* Clone the PTE */
*target_pte = *pte;
- addr += PAGE_SIZE;
+ addr = round_up(addr + 1, PAGE_SIZE);
} else {
BUG();
@@ -496,7 +496,7 @@ static void pti_clone_entry_text(void)
{
pti_clone_pgtable((unsigned long) __entry_text_start,
(unsigned long) __entry_text_end,
- PTI_CLONE_PMD);
+ PTI_LEVEL_KERNEL_IMAGE);
}
/*
diff --git a/drivers/accel/ivpu/ivpu_fw.c b/drivers/accel/ivpu/ivpu_fw.c
index de3d66116375..ede6165e09d9 100644
--- a/drivers/accel/ivpu/ivpu_fw.c
+++ b/drivers/accel/ivpu/ivpu_fw.c
@@ -60,6 +60,10 @@ static struct {
{ IVPU_HW_IP_40XX, "intel/vpu/vpu_40xx_v0.0.bin" },
};
+/* Production fw_names from the table above */
+MODULE_FIRMWARE("intel/vpu/vpu_37xx_v0.0.bin");
+MODULE_FIRMWARE("intel/vpu/vpu_40xx_v0.0.bin");
+
static int ivpu_fw_request(struct ivpu_device *vdev)
{
int ret = -ENOENT;
diff --git a/drivers/bluetooth/Kconfig b/drivers/bluetooth/Kconfig
index 90a94a111e67..769fa288179d 100644
--- a/drivers/bluetooth/Kconfig
+++ b/drivers/bluetooth/Kconfig
@@ -413,6 +413,7 @@ config BT_ATH3K
config BT_MTKSDIO
tristate "MediaTek HCI SDIO driver"
depends on MMC
+ depends on USB || !BT_HCIBTUSB_MTK
select BT_MTK
help
MediaTek Bluetooth HCI SDIO driver.
@@ -425,6 +426,7 @@ config BT_MTKSDIO
config BT_MTKUART
tristate "MediaTek HCI UART driver"
depends on SERIAL_DEV_BUS
+ depends on USB || !BT_HCIBTUSB_MTK
select BT_MTK
help
MediaTek Bluetooth HCI UART driver.
diff --git a/drivers/bluetooth/btintel.c b/drivers/bluetooth/btintel.c
index e7a612504ab1..2ebc970e6573 100644
--- a/drivers/bluetooth/btintel.c
+++ b/drivers/bluetooth/btintel.c
@@ -3085,6 +3085,9 @@ static int btintel_setup_combined(struct hci_dev *hdev)
btintel_set_dsm_reset_method(hdev, &ver_tlv);
err = btintel_bootloader_setup_tlv(hdev, &ver_tlv);
+ if (err)
+ goto exit_error;
+
btintel_register_devcoredump_support(hdev);
btintel_print_fseq_info(hdev);
break;
diff --git a/drivers/bluetooth/btmtk.c b/drivers/bluetooth/btmtk.c
index b7c348687a77..2b7c80043aa2 100644
--- a/drivers/bluetooth/btmtk.c
+++ b/drivers/bluetooth/btmtk.c
@@ -437,6 +437,7 @@ int btmtk_process_coredump(struct hci_dev *hdev, struct sk_buff *skb)
}
EXPORT_SYMBOL_GPL(btmtk_process_coredump);
+#if IS_ENABLED(CONFIG_BT_HCIBTUSB_MTK)
static void btmtk_usb_wmt_recv(struct urb *urb)
{
struct hci_dev *hdev = urb->context;
@@ -1262,7 +1263,8 @@ int btmtk_usb_suspend(struct hci_dev *hdev)
struct btmtk_data *btmtk_data = hci_get_priv(hdev);
/* Stop urb anchor for iso data transmission */
- usb_kill_anchored_urbs(&btmtk_data->isopkt_anchor);
+ if (test_bit(BTMTK_ISOPKT_RUNNING, &btmtk_data->flags))
+ usb_kill_anchored_urbs(&btmtk_data->isopkt_anchor);
return 0;
}
@@ -1487,6 +1489,7 @@ int btmtk_usb_shutdown(struct hci_dev *hdev)
return 0;
}
EXPORT_SYMBOL_GPL(btmtk_usb_shutdown);
+#endif
MODULE_AUTHOR("Sean Wang <[email protected]>");
MODULE_AUTHOR("Mark Chen <[email protected]>");
diff --git a/drivers/cache/Kconfig b/drivers/cache/Kconfig
index 94abd8f632a7..db51386c663a 100644
--- a/drivers/cache/Kconfig
+++ b/drivers/cache/Kconfig
@@ -18,6 +18,7 @@ config STARFIVE_STARLINK_CACHE
bool "StarFive StarLink Cache controller"
depends on RISCV
depends on ARCH_STARFIVE
+ depends on 64BIT
select RISCV_DMA_NONCOHERENT
select RISCV_NONSTANDARD_CACHE_OPS
help
diff --git a/drivers/dma-buf/dma-heap.c b/drivers/dma-buf/dma-heap.c
index 2298ca5e112e..3cbe87d4a464 100644
--- a/drivers/dma-buf/dma-heap.c
+++ b/drivers/dma-buf/dma-heap.c
@@ -7,17 +7,15 @@
*/
#include <linux/cdev.h>
-#include <linux/debugfs.h>
#include <linux/device.h>
#include <linux/dma-buf.h>
+#include <linux/dma-heap.h>
#include <linux/err.h>
-#include <linux/xarray.h>
#include <linux/list.h>
-#include <linux/slab.h>
#include <linux/nospec.h>
-#include <linux/uaccess.h>
#include <linux/syscalls.h>
-#include <linux/dma-heap.h>
+#include <linux/uaccess.h>
+#include <linux/xarray.h>
#include <uapi/linux/dma-heap.h>
#define DEVNAME "dma_heap"
@@ -28,9 +26,10 @@
* struct dma_heap - represents a dmabuf heap in the system
* @name: used for debugging/device-node name
* @ops: ops struct for this heap
- * @heap_devt heap device node
- * @list list head connecting to list of heaps
- * @heap_cdev heap char device
+ * @priv: private data for this heap
+ * @heap_devt: heap device node
+ * @list: list head connecting to list of heaps
+ * @heap_cdev: heap char device
*
* Represents a heap of memory from which buffers can be made.
*/
@@ -193,11 +192,11 @@ static const struct file_operations dma_heap_fops = {
};
/**
- * dma_heap_get_drvdata() - get per-subdriver data for the heap
+ * dma_heap_get_drvdata - get per-heap driver data
* @heap: DMA-Heap to retrieve private data for
*
* Returns:
- * The per-subdriver data for the heap.
+ * The per-heap data for the heap.
*/
void *dma_heap_get_drvdata(struct dma_heap *heap)
{
@@ -205,8 +204,8 @@ void *dma_heap_get_drvdata(struct dma_heap *heap)
}
/**
- * dma_heap_get_name() - get heap name
- * @heap: DMA-Heap to retrieve private data for
+ * dma_heap_get_name - get heap name
+ * @heap: DMA-Heap to retrieve the name of
*
* Returns:
* The char* for the heap name.
@@ -216,6 +215,10 @@ const char *dma_heap_get_name(struct dma_heap *heap)
return heap->name;
}
+/**
+ * dma_heap_add - adds a heap to dmabuf heaps
+ * @exp_info: information needed to register this heap
+ */
struct dma_heap *dma_heap_add(const struct dma_heap_export_info *exp_info)
{
struct dma_heap *heap, *h, *err_ret;
diff --git a/drivers/edac/skx_common.h b/drivers/edac/skx_common.h
index 11faf1db4fa4..473421ba7a18 100644
--- a/drivers/edac/skx_common.h
+++ b/drivers/edac/skx_common.h
@@ -45,7 +45,6 @@
#define I10NM_NUM_CHANNELS MAX(I10NM_NUM_DDR_CHANNELS, I10NM_NUM_HBM_CHANNELS)
#define I10NM_NUM_DIMMS MAX(I10NM_NUM_DDR_DIMMS, I10NM_NUM_HBM_DIMMS)
-#define MAX(a, b) ((a) > (b) ? (a) : (b))
#define NUM_IMC MAX(SKX_NUM_IMC, I10NM_NUM_IMC)
#define NUM_CHANNELS MAX(SKX_NUM_CHANNELS, I10NM_NUM_CHANNELS)
#define NUM_DIMMS MAX(SKX_NUM_DIMMS, I10NM_NUM_DIMMS)
diff --git a/drivers/firmware/efi/libstub/Makefile b/drivers/firmware/efi/libstub/Makefile
index f23ba62ce127..ed4e8ddbe76a 100644
--- a/drivers/firmware/efi/libstub/Makefile
+++ b/drivers/firmware/efi/libstub/Makefile
@@ -27,7 +27,8 @@ cflags-$(CONFIG_ARM64) += -fpie $(DISABLE_STACKLEAK_PLUGIN) \
cflags-$(CONFIG_ARM) += -DEFI_HAVE_STRLEN -DEFI_HAVE_STRNLEN \
-DEFI_HAVE_MEMCHR -DEFI_HAVE_STRRCHR \
-DEFI_HAVE_STRCMP -fno-builtin -fpic \
- $(call cc-option,-mno-single-pic-base)
+ $(call cc-option,-mno-single-pic-base) \
+ $(DISABLE_STACKLEAK_PLUGIN)
cflags-$(CONFIG_RISCV) += -fpic -DNO_ALTERNATIVE -mno-relax \
$(DISABLE_STACKLEAK_PLUGIN)
cflags-$(CONFIG_LOONGARCH) += -fpie
@@ -57,6 +58,10 @@ KBUILD_CFLAGS := $(filter-out $(CC_FLAGS_CFI), $(KBUILD_CFLAGS))
# disable LTO
KBUILD_CFLAGS := $(filter-out $(CC_FLAGS_LTO), $(KBUILD_CFLAGS))
+# The .data section would be renamed to .data.efistub, therefore, remove
+# `-fdata-sections` flag from KBUILD_CFLAGS_KERNEL
+KBUILD_CFLAGS_KERNEL := $(filter-out -fdata-sections, $(KBUILD_CFLAGS_KERNEL))
+
lib-y := efi-stub-helper.o gop.o secureboot.o tpm.o \
file.o mem.o random.o randomalloc.o pci.o \
skip_spaces.o lib-cmdline.o lib-ctype.o \
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index fd0749c0c630..0387143bbb39 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -107,7 +107,7 @@ config DRM_KMS_HELPER
config DRM_PANIC
bool "Display a user-friendly message when a kernel panic occurs"
- depends on DRM && !(FRAMEBUFFER_CONSOLE && VT_CONSOLE)
+ depends on DRM
select FONT_SUPPORT
help
Enable a drm panic handler, which will display a user-friendly message
@@ -268,6 +268,7 @@ config DRM_EXEC
config DRM_GPUVM
tristate
depends on DRM
+ select DRM_EXEC
help
GPU-VM representation providing helpers to manage a GPUs virtual
address space
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c
index 3a3f3ce09f00..2320df51c914 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c
@@ -300,7 +300,7 @@ static int suspend_resume_compute_scheduler(struct amdgpu_device *adev, bool sus
if (r)
goto out;
} else {
- drm_sched_start(&ring->sched, false);
+ drm_sched_start(&ring->sched);
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 916b6b8cf7d9..9aa952f258cf 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -1778,7 +1778,7 @@ int amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
struct ttm_operation_ctx ctx = { false, false };
struct amdgpu_vm *vm = &fpriv->vm;
struct amdgpu_bo_va_mapping *mapping;
- int r;
+ int i, r;
addr /= AMDGPU_GPU_PAGE_SIZE;
@@ -1793,13 +1793,13 @@ int amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
if (dma_resv_locking_ctx((*bo)->tbo.base.resv) != &parser->exec.ticket)
return -EINVAL;
- if (!((*bo)->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)) {
- (*bo)->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
- amdgpu_bo_placement_from_domain(*bo, (*bo)->allowed_domains);
- r = ttm_bo_validate(&(*bo)->tbo, &(*bo)->placement, &ctx);
- if (r)
- return r;
- }
+ (*bo)->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
+ amdgpu_bo_placement_from_domain(*bo, (*bo)->allowed_domains);
+ for (i = 0; i < (*bo)->placement.num_placement; i++)
+ (*bo)->placements[i].flags |= TTM_PL_FLAG_CONTIGUOUS;
+ r = ttm_bo_validate(&(*bo)->tbo, &(*bo)->placement, &ctx);
+ if (r)
+ return r;
return amdgpu_ttm_alloc_gart(&(*bo)->tbo);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index bcacf2e35eba..1cd7d355689c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -5879,7 +5879,7 @@ skip_hw_reset:
if (!amdgpu_ring_sched_ready(ring))
continue;
- drm_sched_start(&ring->sched, true);
+ drm_sched_start(&ring->sched);
}
if (!drm_drv_uses_atomic_modeset(adev_to_drm(tmp_adev)) && !job_signaled)
@@ -6374,7 +6374,7 @@ void amdgpu_pci_resume(struct pci_dev *pdev)
if (!amdgpu_ring_sched_ready(ring))
continue;
- drm_sched_start(&ring->sched, true);
+ drm_sched_start(&ring->sched);
}
amdgpu_device_unset_mp1_state(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
index e499d6ba306b..dac88d2dd70d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
@@ -103,7 +103,7 @@ static int amdgpu_mes_event_log_init(struct amdgpu_device *adev)
if (!amdgpu_mes_log_enable)
return 0;
- r = amdgpu_bo_create_kernel(adev, AMDGPU_MES_LOG_BUFFER_SIZE, PAGE_SIZE,
+ r = amdgpu_bo_create_kernel(adev, adev->mes.event_log_size, PAGE_SIZE,
AMDGPU_GEM_DOMAIN_GTT,
&adev->mes.event_log_gpu_obj,
&adev->mes.event_log_gpu_addr,
@@ -113,7 +113,7 @@ static int amdgpu_mes_event_log_init(struct amdgpu_device *adev)
return r;
}
- memset(adev->mes.event_log_cpu_addr, 0, PAGE_SIZE);
+ memset(adev->mes.event_log_cpu_addr, 0, adev->mes.event_log_size);
return 0;
@@ -1573,7 +1573,7 @@ static int amdgpu_debugfs_mes_event_log_show(struct seq_file *m, void *unused)
uint32_t *mem = (uint32_t *)(adev->mes.event_log_cpu_addr);
seq_hex_dump(m, "", DUMP_PREFIX_OFFSET, 32, 4,
- mem, AMDGPU_MES_LOG_BUFFER_SIZE, false);
+ mem, adev->mes.event_log_size, false);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h
index e11051271f71..2d659c612f03 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h
@@ -52,7 +52,6 @@ enum amdgpu_mes_priority_level {
#define AMDGPU_MES_PROC_CTX_SIZE 0x1000 /* one page area */
#define AMDGPU_MES_GANG_CTX_SIZE 0x1000 /* one page area */
-#define AMDGPU_MES_LOG_BUFFER_SIZE 0x4000 /* Maximu log buffer size for MES */
struct amdgpu_mes_funcs;
@@ -135,8 +134,9 @@ struct amdgpu_mes {
unsigned long *doorbell_bitmap;
/* MES event log buffer */
- struct amdgpu_bo *event_log_gpu_obj;
- uint64_t event_log_gpu_addr;
+ uint32_t event_log_size;
+ struct amdgpu_bo *event_log_gpu_obj;
+ uint64_t event_log_gpu_addr;
void *event_log_cpu_addr;
/* ip specific functions */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index a060c28f0877..52e6a0b3f0c8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -2421,6 +2421,8 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
if (r)
return r;
+ ttm_lru_bulk_move_init(&vm->lru_bulk_move);
+
vm->is_compute_context = false;
vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode &
@@ -2485,6 +2487,7 @@ error_free_root:
error_free_delayed:
dma_fence_put(vm->last_tlb_flush);
dma_fence_put(vm->last_unlocked);
+ ttm_lru_bulk_move_fini(&adev->mman.bdev, &vm->lru_bulk_move);
amdgpu_vm_fini_entities(vm);
return r;
@@ -2641,6 +2644,7 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
}
}
+ ttm_lru_bulk_move_fini(&adev->mman.bdev, &vm->lru_bulk_move);
}
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
index 8ce51b9236c1..f9343642ae7e 100644
--- a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
@@ -1163,6 +1163,8 @@ static int mes_v11_0_sw_init(void *handle)
adev->mes.kiq_hw_init = &mes_v11_0_kiq_hw_init;
adev->mes.kiq_hw_fini = &mes_v11_0_kiq_hw_fini;
+ adev->mes.event_log_size = AMDGPU_MES_LOG_BUFFER_SIZE;
+
r = amdgpu_mes_init(adev);
if (r)
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c b/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c
index c9f74231ad59..0713bc3eb263 100644
--- a/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c
@@ -551,8 +551,10 @@ static int mes_v12_0_set_hw_resources(struct amdgpu_mes *mes)
mes_set_hw_res_pkt.oversubscription_timer = 50;
mes_set_hw_res_pkt.unmapped_doorbell_handling = 1;
- mes_set_hw_res_pkt.enable_mes_event_int_logging = 0;
- mes_set_hw_res_pkt.event_intr_history_gpu_mc_ptr = mes->event_log_gpu_addr;
+ if (amdgpu_mes_log_enable) {
+ mes_set_hw_res_pkt.enable_mes_event_int_logging = 1;
+ mes_set_hw_res_pkt.event_intr_history_gpu_mc_ptr = mes->event_log_gpu_addr;
+ }
return mes_v12_0_submit_pkt_and_poll_completion(mes,
&mes_set_hw_res_pkt, sizeof(mes_set_hw_res_pkt),
@@ -1237,6 +1239,8 @@ static int mes_v12_0_sw_init(void *handle)
adev->mes.kiq_hw_init = &mes_v12_0_kiq_hw_init;
adev->mes.kiq_hw_fini = &mes_v12_0_kiq_hw_fini;
+ adev->mes.event_log_size = AMDGPU_MES_LOG_BUFFER_SIZE;
+
r = amdgpu_mes_init(adev);
if (r)
return r;
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
index be2638c763d7..9a406d74c0dd 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
@@ -35,8 +35,10 @@
#include "dc_stream_priv.h"
#define DC_LOGGER dc->ctx->logger
+#ifndef MIN
#define MIN(X, Y) ((X) < (Y) ? (X) : (Y))
#define MAX(x, y) ((x > y) ? x : y)
+#endif
/*******************************************************************************
* Private functions
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_ddc.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_ddc.c
index 7ecf76aea950..6e064e6ae949 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_ddc.c
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_ddc.c
@@ -25,7 +25,9 @@
#include "hdcp.h"
+#ifndef MIN
#define MIN(a, b) ((a) < (b) ? (a) : (b))
+#endif
#define HDCP_I2C_ADDR 0x3a /* 0x74 >> 1*/
#define KSV_READ_SIZE 0xf /* 0x6803b - 0x6802c */
#define HDCP_MAX_AUX_TRANSACTION_SIZE 16
diff --git a/drivers/gpu/drm/amd/include/mes_v11_api_def.h b/drivers/gpu/drm/amd/include/mes_v11_api_def.h
index b72d5d362251..21ceafce1f9b 100644
--- a/drivers/gpu/drm/amd/include/mes_v11_api_def.h
+++ b/drivers/gpu/drm/amd/include/mes_v11_api_def.h
@@ -28,6 +28,9 @@
#define MES_API_VERSION 1
+/* Maximum log buffer size for MES. Needs to be updated if MES expands MES_EVT_INTR_HIST_LOG */
+#define AMDGPU_MES_LOG_BUFFER_SIZE 0x4000
+
/* Driver submits one API(cmd) as a single Frame and this command size is same
* for all API to ease the debugging and parsing of ring buffer.
*/
diff --git a/drivers/gpu/drm/amd/include/mes_v12_api_def.h b/drivers/gpu/drm/amd/include/mes_v12_api_def.h
index ffd67c6ed9b3..4cf2c9f30b3d 100644
--- a/drivers/gpu/drm/amd/include/mes_v12_api_def.h
+++ b/drivers/gpu/drm/amd/include/mes_v12_api_def.h
@@ -28,6 +28,9 @@
#define MES_API_VERSION 0x14
+/* Maximum log buffer size for MES. Needs to be updated if MES expands MES_EVT_INTR_HIST_LOG_12 */
+#define AMDGPU_MES_LOG_BUFFER_SIZE 0xC000
+
/* Driver submits one API(cmd) as a single Frame and this command size is same for all API
* to ease the debugging and parsing of ring buffer.
*/
diff --git a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
index a1b8a82d77cf..8b7d6ed7e2ed 100644
--- a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
+++ b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
@@ -618,7 +618,8 @@ int amdgpu_pm_load_smu_firmware(struct amdgpu_device *adev, uint32_t *smu_versio
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
int r = 0;
- if (!pp_funcs || !pp_funcs->load_firmware || adev->flags & AMD_IS_APU)
+ if (!pp_funcs || !pp_funcs->load_firmware ||
+ (is_support_sw_smu(adev) && (adev->flags & AMD_IS_APU)))
return 0;
mutex_lock(&adev->pm.mutex);
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppevvmath.h b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppevvmath.h
index 6f54c410c2f9..409aeec6baa9 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppevvmath.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppevvmath.h
@@ -22,12 +22,18 @@
*/
#include <asm/div64.h>
-#define SHIFT_AMOUNT 16 /* We multiply all original integers with 2^SHIFT_AMOUNT to get the fInt representation */
+enum ppevvmath_constants {
+ /* We multiply all original integers with 2^SHIFT_AMOUNT to get the fInt representation */
+ SHIFT_AMOUNT = 16,
-#define PRECISION 5 /* Change this value to change the number of decimal places in the final output - 5 is a good default */
+ /* Change this value to change the number of decimal places in the final output - 5 is a good default */
+ PRECISION = 5,
-#define SHIFTED_2 (2 << SHIFT_AMOUNT)
-#define MAX (1 << (SHIFT_AMOUNT - 1)) - 1 /* 32767 - Might change in the future */
+ SHIFTED_2 = (2 << SHIFT_AMOUNT),
+
+ /* 32767 - Might change in the future */
+ MAX = (1 << (SHIFT_AMOUNT - 1)) - 1,
+};
/* -------------------------------------------------------------------------------
* NEW TYPE - fINT
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c
index 98ea58d792ca..e1a27903c80a 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c
@@ -66,6 +66,7 @@
#define MP0_MP1_DATA_REGION_SIZE_COMBOPPTABLE 0x4000
#define DEBUGSMC_MSG_Mode1Reset 2
+#define LINK_SPEED_MAX 3
static struct cmn2asic_msg_mapping smu_v14_0_2_message_map[SMU_MSG_MAX_COUNT] = {
MSG_MAP(TestMessage, PPSMC_MSG_TestMessage, 1),
@@ -221,7 +222,6 @@ static struct cmn2asic_mapping smu_v14_0_2_workload_map[PP_SMC_POWER_PROFILE_COU
WORKLOAD_MAP(PP_SMC_POWER_PROFILE_WINDOW3D, WORKLOAD_PPLIB_WINDOW_3D_BIT),
};
-#if 0
static const uint8_t smu_v14_0_2_throttler_map[] = {
[THROTTLER_PPT0_BIT] = (SMU_THROTTLER_PPT0_BIT),
[THROTTLER_PPT1_BIT] = (SMU_THROTTLER_PPT1_BIT),
@@ -241,7 +241,6 @@ static const uint8_t smu_v14_0_2_throttler_map[] = {
[THROTTLER_GFX_APCC_PLUS_BIT] = (SMU_THROTTLER_APCC_BIT),
[THROTTLER_FIT_BIT] = (SMU_THROTTLER_FIT_BIT),
};
-#endif
static int
smu_v14_0_2_get_allowed_feature_mask(struct smu_context *smu,
@@ -1869,6 +1868,88 @@ static ssize_t smu_v14_0_2_get_ecc_info(struct smu_context *smu,
return ret;
}
+static ssize_t smu_v14_0_2_get_gpu_metrics(struct smu_context *smu,
+ void **table)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+ struct gpu_metrics_v1_3 *gpu_metrics =
+ (struct gpu_metrics_v1_3 *)smu_table->gpu_metrics_table;
+ SmuMetricsExternal_t metrics_ext;
+ SmuMetrics_t *metrics = &metrics_ext.SmuMetrics;
+ int ret = 0;
+
+ ret = smu_cmn_get_metrics_table(smu,
+ &metrics_ext,
+ true);
+ if (ret)
+ return ret;
+
+ smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 3);
+
+ gpu_metrics->temperature_edge = metrics->AvgTemperature[TEMP_EDGE];
+ gpu_metrics->temperature_hotspot = metrics->AvgTemperature[TEMP_HOTSPOT];
+ gpu_metrics->temperature_mem = metrics->AvgTemperature[TEMP_MEM];
+ gpu_metrics->temperature_vrgfx = metrics->AvgTemperature[TEMP_VR_GFX];
+ gpu_metrics->temperature_vrsoc = metrics->AvgTemperature[TEMP_VR_SOC];
+ gpu_metrics->temperature_vrmem = max(metrics->AvgTemperature[TEMP_VR_MEM0],
+ metrics->AvgTemperature[TEMP_VR_MEM1]);
+
+ gpu_metrics->average_gfx_activity = metrics->AverageGfxActivity;
+ gpu_metrics->average_umc_activity = metrics->AverageUclkActivity;
+ gpu_metrics->average_mm_activity = max(metrics->Vcn0ActivityPercentage,
+ metrics->Vcn1ActivityPercentage);
+
+ gpu_metrics->average_socket_power = metrics->AverageSocketPower;
+ gpu_metrics->energy_accumulator = metrics->EnergyAccumulator;
+
+ if (metrics->AverageGfxActivity <= SMU_14_0_2_BUSY_THRESHOLD)
+ gpu_metrics->average_gfxclk_frequency = metrics->AverageGfxclkFrequencyPostDs;
+ else
+ gpu_metrics->average_gfxclk_frequency = metrics->AverageGfxclkFrequencyPreDs;
+
+ if (metrics->AverageUclkActivity <= SMU_14_0_2_BUSY_THRESHOLD)
+ gpu_metrics->average_uclk_frequency = metrics->AverageMemclkFrequencyPostDs;
+ else
+ gpu_metrics->average_uclk_frequency = metrics->AverageMemclkFrequencyPreDs;
+
+ gpu_metrics->average_vclk0_frequency = metrics->AverageVclk0Frequency;
+ gpu_metrics->average_dclk0_frequency = metrics->AverageDclk0Frequency;
+ gpu_metrics->average_vclk1_frequency = metrics->AverageVclk1Frequency;
+ gpu_metrics->average_dclk1_frequency = metrics->AverageDclk1Frequency;
+
+ gpu_metrics->current_gfxclk = gpu_metrics->average_gfxclk_frequency;
+ gpu_metrics->current_socclk = metrics->CurrClock[PPCLK_SOCCLK];
+ gpu_metrics->current_uclk = metrics->CurrClock[PPCLK_UCLK];
+ gpu_metrics->current_vclk0 = metrics->CurrClock[PPCLK_VCLK_0];
+ gpu_metrics->current_dclk0 = metrics->CurrClock[PPCLK_DCLK_0];
+ gpu_metrics->current_vclk1 = metrics->CurrClock[PPCLK_VCLK_0];
+ gpu_metrics->current_dclk1 = metrics->CurrClock[PPCLK_DCLK_0];
+
+ gpu_metrics->throttle_status =
+ smu_v14_0_2_get_throttler_status(metrics);
+ gpu_metrics->indep_throttle_status =
+ smu_cmn_get_indep_throttler_status(gpu_metrics->throttle_status,
+ smu_v14_0_2_throttler_map);
+
+ gpu_metrics->current_fan_speed = metrics->AvgFanRpm;
+
+ gpu_metrics->pcie_link_width = metrics->PcieWidth;
+ if ((metrics->PcieRate - 1) > LINK_SPEED_MAX)
+ gpu_metrics->pcie_link_speed = pcie_gen_to_speed(1);
+ else
+ gpu_metrics->pcie_link_speed = pcie_gen_to_speed(metrics->PcieRate);
+
+ gpu_metrics->system_clock_counter = ktime_get_boottime_ns();
+
+ gpu_metrics->voltage_gfx = metrics->AvgVoltage[SVI_PLANE_VDD_GFX];
+ gpu_metrics->voltage_soc = metrics->AvgVoltage[SVI_PLANE_VDD_SOC];
+ gpu_metrics->voltage_mem = metrics->AvgVoltage[SVI_PLANE_VDDIO_MEM];
+
+ *table = (void *)gpu_metrics;
+
+ return sizeof(struct gpu_metrics_v1_3);
+}
+
static const struct pptable_funcs smu_v14_0_2_ppt_funcs = {
.get_allowed_feature_mask = smu_v14_0_2_get_allowed_feature_mask,
.set_default_dpm_table = smu_v14_0_2_set_default_dpm_table,
@@ -1905,6 +1986,7 @@ static const struct pptable_funcs smu_v14_0_2_ppt_funcs = {
.enable_thermal_alert = smu_v14_0_enable_thermal_alert,
.disable_thermal_alert = smu_v14_0_disable_thermal_alert,
.notify_memory_pool_location = smu_v14_0_notify_memory_pool_location,
+ .get_gpu_metrics = smu_v14_0_2_get_gpu_metrics,
.set_soft_freq_limited_range = smu_v14_0_set_soft_freq_limited_range,
.init_pptable_microcode = smu_v14_0_init_pptable_microcode,
.populate_umd_state_clk = smu_v14_0_2_populate_umd_state_clk,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
index 88eefef05fae..91ad434bcdae 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
@@ -794,7 +794,7 @@ static const char *smu_get_feature_name(struct smu_context *smu,
size_t smu_cmn_get_pp_feature_mask(struct smu_context *smu,
char *buf)
{
- int8_t sort_feature[max(SMU_FEATURE_COUNT, SMU_FEATURE_MAX)];
+ int8_t sort_feature[MAX(SMU_FEATURE_COUNT, SMU_FEATURE_MAX)];
uint64_t feature_mask;
int i, feature_index;
uint32_t count = 0;
diff --git a/drivers/gpu/drm/ast/ast_dp.c b/drivers/gpu/drm/ast/ast_dp.c
index 1e9259416980..5d07678b502c 100644
--- a/drivers/gpu/drm/ast/ast_dp.c
+++ b/drivers/gpu/drm/ast/ast_dp.c
@@ -9,11 +9,7 @@
bool ast_astdp_is_connected(struct ast_device *ast)
{
- if (!ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xD1, ASTDP_MCU_FW_EXECUTING))
- return false;
- if (!ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xDF, ASTDP_HPD))
- return false;
- if (!ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xDC, ASTDP_LINK_SUCCESS))
+ if (!ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xDF, AST_IO_VGACRDF_HPD))
return false;
return true;
}
@@ -21,70 +17,55 @@ bool ast_astdp_is_connected(struct ast_device *ast)
int ast_astdp_read_edid(struct drm_device *dev, u8 *ediddata)
{
struct ast_device *ast = to_ast_device(dev);
- u8 i = 0, j = 0;
-
- /*
- * CRD1[b5]: DP MCU FW is executing
- * CRDC[b0]: DP link success
- * CRDF[b0]: DP HPD
- * CRE5[b0]: Host reading EDID process is done
- */
- if (!(ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xD1, ASTDP_MCU_FW_EXECUTING) &&
- ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xDC, ASTDP_LINK_SUCCESS) &&
- ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xDF, ASTDP_HPD) &&
- ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xE5,
- ASTDP_HOST_EDID_READ_DONE_MASK))) {
- goto err_astdp_edid_not_ready;
- }
+ int ret = 0;
+ u8 i;
- ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xE5, (u8) ~ASTDP_HOST_EDID_READ_DONE_MASK,
- 0x00);
+ /* Start reading EDID data */
+ ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xe5, (u8)~AST_IO_VGACRE5_EDID_READ_DONE, 0x00);
for (i = 0; i < 32; i++) {
+ unsigned int j;
+
/*
* CRE4[7:0]: Read-Pointer for EDID (Unit: 4bytes); valid range: 0~64
*/
- ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xE4,
- ASTDP_AND_CLEAR_MASK, (u8)i);
- j = 0;
+ ast_set_index_reg(ast, AST_IO_VGACRI, 0xe4, i);
/*
* CRD7[b0]: valid flag for EDID
* CRD6[b0]: mirror read pointer for EDID
*/
- while ((ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xD7,
- ASTDP_EDID_VALID_FLAG_MASK) != 0x01) ||
- (ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xD6,
- ASTDP_EDID_READ_POINTER_MASK) != i)) {
+ for (j = 0; j < 200; ++j) {
+ u8 vgacrd7, vgacrd6;
+
/*
* Delay are getting longer with each retry.
- * 1. The Delays are often 2 loops when users request "Display Settings"
+ *
+ * 1. No delay on first try
+ * 2. The Delays are often 2 loops when users request "Display Settings"
* of right-click of mouse.
- * 2. The Delays are often longer a lot when system resume from S3/S4.
+ * 3. The Delays are often longer a lot when system resume from S3/S4.
*/
- mdelay(j+1);
-
- if (!(ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xD1,
- ASTDP_MCU_FW_EXECUTING) &&
- ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xDC,
- ASTDP_LINK_SUCCESS) &&
- ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xDF, ASTDP_HPD))) {
- goto err_astdp_jump_out_loop_of_edid;
+ if (j)
+ mdelay(j + 1);
+
+ /* Wait for EDID offset to show up in mirror register */
+ vgacrd7 = ast_get_index_reg(ast, AST_IO_VGACRI, 0xd7);
+ if (vgacrd7 & AST_IO_VGACRD7_EDID_VALID_FLAG) {
+ vgacrd6 = ast_get_index_reg(ast, AST_IO_VGACRI, 0xd6);
+ if (vgacrd6 == i)
+ break;
}
-
- j++;
- if (j > 200)
- goto err_astdp_jump_out_loop_of_edid;
+ }
+ if (j == 200) {
+ ret = -EBUSY;
+ goto out;
}
- *(ediddata) = ast_get_index_reg_mask(ast, AST_IO_VGACRI,
- 0xD8, ASTDP_EDID_READ_DATA_MASK);
- *(ediddata + 1) = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xD9,
- ASTDP_EDID_READ_DATA_MASK);
- *(ediddata + 2) = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xDA,
- ASTDP_EDID_READ_DATA_MASK);
- *(ediddata + 3) = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xDB,
- ASTDP_EDID_READ_DATA_MASK);
+ ediddata[0] = ast_get_index_reg(ast, AST_IO_VGACRI, 0xd8);
+ ediddata[1] = ast_get_index_reg(ast, AST_IO_VGACRI, 0xd9);
+ ediddata[2] = ast_get_index_reg(ast, AST_IO_VGACRI, 0xda);
+ ediddata[3] = ast_get_index_reg(ast, AST_IO_VGACRI, 0xdb);
if (i == 31) {
/*
@@ -96,69 +77,57 @@ int ast_astdp_read_edid(struct drm_device *dev, u8 *ediddata)
* The Bytes-126 indicates the Number of extensions to
* follow. 0 represents noextensions.
*/
- *(ediddata + 3) = *(ediddata + 3) + *(ediddata + 2);
- *(ediddata + 2) = 0;
+ ediddata[3] = ediddata[3] + ediddata[2];
+ ediddata[2] = 0;
}
ediddata += 4;
}
- ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xE5, (u8) ~ASTDP_HOST_EDID_READ_DONE_MASK,
- ASTDP_HOST_EDID_READ_DONE);
-
- return 0;
+out:
+ /* Signal end of reading */
+ ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xe5, (u8)~AST_IO_VGACRE5_EDID_READ_DONE,
+ AST_IO_VGACRE5_EDID_READ_DONE);
-err_astdp_jump_out_loop_of_edid:
- ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xE5,
- (u8) ~ASTDP_HOST_EDID_READ_DONE_MASK,
- ASTDP_HOST_EDID_READ_DONE);
- return (~(j+256) + 1);
-
-err_astdp_edid_not_ready:
- if (!(ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xD1, ASTDP_MCU_FW_EXECUTING)))
- return (~0xD1 + 1);
- if (!(ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xDC, ASTDP_LINK_SUCCESS)))
- return (~0xDC + 1);
- if (!(ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xDF, ASTDP_HPD)))
- return (~0xDF + 1);
- if (!(ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xE5, ASTDP_HOST_EDID_READ_DONE_MASK)))
- return (~0xE5 + 1);
-
- return 0;
+ return ret;
}
/*
* Launch Aspeed DP
*/
-void ast_dp_launch(struct drm_device *dev)
+int ast_dp_launch(struct ast_device *ast)
{
- u32 i = 0;
- u8 bDPExecute = 1;
- struct ast_device *ast = to_ast_device(dev);
+ struct drm_device *dev = &ast->base;
+ unsigned int i = 10;
- // Wait one second then timeout.
- while (ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xD1, ASTDP_MCU_FW_EXECUTING) !=
- ASTDP_MCU_FW_EXECUTING) {
- i++;
- // wait 100 ms
- msleep(100);
+ while (i) {
+ u8 vgacrd1 = ast_get_index_reg(ast, AST_IO_VGACRI, 0xd1);
- if (i >= 10) {
- // DP would not be ready.
- bDPExecute = 0;
+ if (vgacrd1 & AST_IO_VGACRD1_MCU_FW_EXECUTING)
break;
- }
+ --i;
+ msleep(100);
}
-
- if (!bDPExecute)
+ if (!i) {
drm_err(dev, "Wait DPMCU executing timeout\n");
+ return -ENODEV;
+ }
+
+ ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xe5,
+ (u8) ~AST_IO_VGACRE5_EDID_READ_DONE,
+ AST_IO_VGACRE5_EDID_READ_DONE);
- ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xE5,
- (u8) ~ASTDP_HOST_EDID_READ_DONE_MASK,
- ASTDP_HOST_EDID_READ_DONE);
+ return 0;
}
+bool ast_dp_power_is_on(struct ast_device *ast)
+{
+ u8 vgacre3;
+
+ vgacre3 = ast_get_index_reg(ast, AST_IO_VGACRI, 0xe3);
+ return !(vgacre3 & AST_DP_PHY_SLEEP);
+}
void ast_dp_power_on_off(struct drm_device *dev, bool on)
{
@@ -174,7 +143,22 @@ void ast_dp_power_on_off(struct drm_device *dev, bool on)
ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xE3, (u8) ~AST_DP_PHY_SLEEP, bE3);
}
+void ast_dp_link_training(struct ast_device *ast)
+{
+ struct drm_device *dev = &ast->base;
+ unsigned int i = 10;
+ while (i--) {
+ u8 vgacrdc = ast_get_index_reg(ast, AST_IO_VGACRI, 0xdc);
+
+ if (vgacrdc & AST_IO_VGACRDC_LINK_SUCCESS)
+ break;
+ if (i)
+ msleep(100);
+ }
+ if (!i)
+ drm_err(dev, "Link training failed\n");
+}
void ast_dp_set_on_off(struct drm_device *dev, bool on)
{
@@ -185,17 +169,13 @@ void ast_dp_set_on_off(struct drm_device *dev, bool on)
// Video On/Off
ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xE3, (u8) ~AST_DP_VIDEO_ENABLE, on);
- // If DP plug in and link successful then check video on / off status
- if (ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xDC, ASTDP_LINK_SUCCESS) &&
- ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xDF, ASTDP_HPD)) {
- video_on_off <<= 4;
- while (ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xDF,
+ video_on_off <<= 4;
+ while (ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xDF,
ASTDP_MIRROR_VIDEO_ENABLE) != video_on_off) {
- // wait 1 ms
- mdelay(1);
- if (++i > 200)
- break;
- }
+ // wait 1 ms
+ mdelay(1);
+ if (++i > 200)
+ break;
}
}
diff --git a/drivers/gpu/drm/ast/ast_drv.c b/drivers/gpu/drm/ast/ast_drv.c
index aae019e79bda..225817087b4d 100644
--- a/drivers/gpu/drm/ast/ast_drv.c
+++ b/drivers/gpu/drm/ast/ast_drv.c
@@ -391,6 +391,11 @@ static int ast_drm_freeze(struct drm_device *dev)
static int ast_drm_thaw(struct drm_device *dev)
{
+ struct ast_device *ast = to_ast_device(dev);
+
+ ast_enable_vga(ast->ioregs);
+ ast_open_key(ast->ioregs);
+ ast_enable_mmio(dev->dev, ast->ioregs);
ast_post_gpu(dev);
return drm_mode_config_helper_resume(dev);
diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h
index ba3d86973995..d23b98ce4359 100644
--- a/drivers/gpu/drm/ast/ast_drv.h
+++ b/drivers/gpu/drm/ast/ast_drv.h
@@ -471,8 +471,10 @@ void ast_init_3rdtx(struct drm_device *dev);
/* aspeed DP */
bool ast_astdp_is_connected(struct ast_device *ast);
int ast_astdp_read_edid(struct drm_device *dev, u8 *ediddata);
-void ast_dp_launch(struct drm_device *dev);
+int ast_dp_launch(struct ast_device *ast);
+bool ast_dp_power_is_on(struct ast_device *ast);
void ast_dp_power_on_off(struct drm_device *dev, bool no);
+void ast_dp_link_training(struct ast_device *ast);
void ast_dp_set_on_off(struct drm_device *dev, bool no);
void ast_dp_set_mode(struct drm_crtc *crtc, struct ast_vbios_mode_info *vbios_mode);
diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c
index 0637abb70361..d836f2a4f9f3 100644
--- a/drivers/gpu/drm/ast/ast_main.c
+++ b/drivers/gpu/drm/ast/ast_main.c
@@ -115,8 +115,10 @@ static void ast_detect_tx_chip(struct ast_device *ast, bool need_post)
} else if (IS_AST_GEN7(ast)) {
if (ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xD1, TX_TYPE_MASK) ==
ASTDP_DPMCU_TX) {
- ast->tx_chip_types = AST_TX_ASTDP_BIT;
- ast_dp_launch(&ast->base);
+ int ret = ast_dp_launch(ast);
+
+ if (!ret)
+ ast->tx_chip_types = AST_TX_ASTDP_BIT;
}
}
diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c
index dc8f639e82fd..ddb7696acc04 100644
--- a/drivers/gpu/drm/ast/ast_mode.c
+++ b/drivers/gpu/drm/ast/ast_mode.c
@@ -28,6 +28,7 @@
* Authors: Dave Airlie <[email protected]>
*/
+#include <linux/delay.h>
#include <linux/export.h>
#include <linux/pci.h>
@@ -1621,6 +1622,8 @@ static void ast_astdp_encoder_helper_atomic_enable(struct drm_encoder *encoder,
struct ast_device *ast = to_ast_device(dev);
ast_dp_power_on_off(dev, AST_DP_POWER_ON);
+ ast_dp_link_training(ast);
+
ast_wait_for_vretrace(ast);
ast_dp_set_on_off(dev, 1);
}
@@ -1687,11 +1690,35 @@ static int ast_astdp_connector_helper_detect_ctx(struct drm_connector *connector
struct drm_modeset_acquire_ctx *ctx,
bool force)
{
+ struct drm_device *dev = connector->dev;
struct ast_device *ast = to_ast_device(connector->dev);
+ enum drm_connector_status status = connector_status_disconnected;
+ struct drm_connector_state *connector_state = connector->state;
+ bool is_active = false;
+
+ mutex_lock(&ast->modeset_lock);
+
+ if (connector_state && connector_state->crtc) {
+ struct drm_crtc_state *crtc_state = connector_state->crtc->state;
+
+ if (crtc_state && crtc_state->active)
+ is_active = true;
+ }
+
+ if (!is_active && !ast_dp_power_is_on(ast)) {
+ ast_dp_power_on_off(dev, true);
+ msleep(50);
+ }
if (ast_astdp_is_connected(ast))
- return connector_status_connected;
- return connector_status_disconnected;
+ status = connector_status_connected;
+
+ if (!is_active && status == connector_status_disconnected)
+ ast_dp_power_on_off(dev, false);
+
+ mutex_unlock(&ast->modeset_lock);
+
+ return status;
}
static const struct drm_connector_helper_funcs ast_astdp_connector_helper_funcs = {
diff --git a/drivers/gpu/drm/ast/ast_post.c b/drivers/gpu/drm/ast/ast_post.c
index 22f548805dfb..65755798ab94 100644
--- a/drivers/gpu/drm/ast/ast_post.c
+++ b/drivers/gpu/drm/ast/ast_post.c
@@ -351,7 +351,7 @@ void ast_post_gpu(struct drm_device *dev)
if (IS_AST_GEN7(ast)) {
if (ast->tx_chip_types & AST_TX_ASTDP_BIT)
- ast_dp_launch(dev);
+ ast_dp_launch(ast);
} else if (ast->config_mode == ast_use_p2a) {
if (IS_AST_GEN6(ast))
ast_post_chip_2500(dev);
diff --git a/drivers/gpu/drm/ast/ast_reg.h b/drivers/gpu/drm/ast/ast_reg.h
index 75671d345057..040961cc1a19 100644
--- a/drivers/gpu/drm/ast/ast_reg.h
+++ b/drivers/gpu/drm/ast/ast_reg.h
@@ -37,6 +37,12 @@
#define AST_IO_VGACRCB_HWC_16BPP BIT(0) /* set: ARGB4444, cleared: 2bpp palette */
#define AST_IO_VGACRCB_HWC_ENABLED BIT(1)
+#define AST_IO_VGACRD1_MCU_FW_EXECUTING BIT(5)
+#define AST_IO_VGACRD7_EDID_VALID_FLAG BIT(0)
+#define AST_IO_VGACRDC_LINK_SUCCESS BIT(0)
+#define AST_IO_VGACRDF_HPD BIT(0)
+#define AST_IO_VGACRE5_EDID_READ_DONE BIT(0)
+
#define AST_IO_VGAIR1_R (0x5A)
#define AST_IO_VGAIR1_VREFRESH BIT(3)
@@ -67,18 +73,6 @@
#define AST_DP_VIDEO_ENABLE BIT(0)
/*
- * CRD1[b5]: DP MCU FW is executing
- * CRDC[b0]: DP link success
- * CRDF[b0]: DP HPD
- * CRE5[b0]: Host reading EDID process is done
- */
-#define ASTDP_MCU_FW_EXECUTING BIT(5)
-#define ASTDP_LINK_SUCCESS BIT(0)
-#define ASTDP_HPD BIT(0)
-#define ASTDP_HOST_EDID_READ_DONE BIT(0)
-#define ASTDP_HOST_EDID_READ_DONE_MASK GENMASK(0, 0)
-
-/*
* CRDF[b4]: Mirror of AST_DP_VIDEO_ENABLE
* Precondition: A. ~AST_DP_PHY_SLEEP &&
* B. DP_HPD &&
@@ -86,10 +80,6 @@
*/
#define ASTDP_MIRROR_VIDEO_ENABLE BIT(4)
-#define ASTDP_EDID_READ_POINTER_MASK GENMASK(7, 0)
-#define ASTDP_EDID_VALID_FLAG_MASK GENMASK(0, 0)
-#define ASTDP_EDID_READ_DATA_MASK GENMASK(7, 0)
-
/*
* ASTDP setmode registers:
* CRE0[7:0]: MISC0 ((0x00: 18-bpp) or (0x20: 24-bpp)
diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
index ddf1e4424ffd..bfa88409a7ff 100644
--- a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
+++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
@@ -36,11 +36,6 @@
static const bool verify_fast_training;
-struct bridge_init {
- struct i2c_client *client;
- struct device_node *node;
-};
-
static void analogix_dp_init_dp(struct analogix_dp_device *dp)
{
analogix_dp_reset(dp);
diff --git a/drivers/gpu/drm/bridge/ite-it6505.c b/drivers/gpu/drm/bridge/ite-it6505.c
index 1e1c06fdf206..87b8545fccc0 100644
--- a/drivers/gpu/drm/bridge/ite-it6505.c
+++ b/drivers/gpu/drm/bridge/ite-it6505.c
@@ -460,6 +460,8 @@ struct it6505 {
bool enable_drv_hold;
const struct drm_edid *cached_edid;
+
+ int irq;
};
struct it6505_step_train_para {
@@ -2624,6 +2626,8 @@ static int it6505_poweron(struct it6505 *it6505)
it6505_init(it6505);
it6505_lane_off(it6505);
+ enable_irq(it6505->irq);
+
return 0;
}
@@ -2640,6 +2644,8 @@ static int it6505_poweroff(struct it6505 *it6505)
return 0;
}
+ disable_irq_nosync(it6505->irq);
+
if (pdata->gpiod_reset)
gpiod_set_value_cansleep(pdata->gpiod_reset, 0);
@@ -3389,7 +3395,7 @@ static int it6505_i2c_probe(struct i2c_client *client)
struct it6505 *it6505;
struct device *dev = &client->dev;
struct extcon_dev *extcon;
- int err, intp_irq;
+ int err;
it6505 = devm_kzalloc(&client->dev, sizeof(*it6505), GFP_KERNEL);
if (!it6505)
@@ -3430,17 +3436,18 @@ static int it6505_i2c_probe(struct i2c_client *client)
it6505_parse_dt(it6505);
- intp_irq = client->irq;
+ it6505->irq = client->irq;
- if (!intp_irq) {
+ if (!it6505->irq) {
dev_err(dev, "Failed to get INTP IRQ");
err = -ENODEV;
return err;
}
- err = devm_request_threaded_irq(&client->dev, intp_irq, NULL,
+ err = devm_request_threaded_irq(&client->dev, it6505->irq, NULL,
it6505_int_threaded_handler,
- IRQF_TRIGGER_LOW | IRQF_ONESHOT,
+ IRQF_TRIGGER_LOW | IRQF_ONESHOT |
+ IRQF_NO_AUTOEN,
"it6505-intp", it6505);
if (err) {
dev_err(dev, "Failed to request INTP threaded IRQ: %d", err);
diff --git a/drivers/gpu/drm/bridge/lontium-lt9611uxc.c b/drivers/gpu/drm/bridge/lontium-lt9611uxc.c
index 4e802b54a1cb..4d1d40e1f1b4 100644
--- a/drivers/gpu/drm/bridge/lontium-lt9611uxc.c
+++ b/drivers/gpu/drm/bridge/lontium-lt9611uxc.c
@@ -23,6 +23,7 @@
#include <drm/drm_bridge.h>
#include <drm/drm_edid.h>
#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_of.h>
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
@@ -34,7 +35,7 @@
struct lt9611uxc {
struct device *dev;
struct drm_bridge bridge;
- struct drm_connector connector;
+ struct drm_bridge *next_bridge;
struct regmap *regmap;
/* Protects all accesses to registers by stopping the on-chip MCU */
@@ -120,11 +121,6 @@ static struct lt9611uxc *bridge_to_lt9611uxc(struct drm_bridge *bridge)
return container_of(bridge, struct lt9611uxc, bridge);
}
-static struct lt9611uxc *connector_to_lt9611uxc(struct drm_connector *connector)
-{
- return container_of(connector, struct lt9611uxc, connector);
-}
-
static void lt9611uxc_lock(struct lt9611uxc *lt9611uxc)
{
mutex_lock(&lt9611uxc->ocm_lock);
@@ -171,20 +167,14 @@ static void lt9611uxc_hpd_work(struct work_struct *work)
struct lt9611uxc *lt9611uxc = container_of(work, struct lt9611uxc, work);
bool connected;
- if (lt9611uxc->connector.dev) {
- if (lt9611uxc->connector.dev->mode_config.funcs)
- drm_kms_helper_hotplug_event(lt9611uxc->connector.dev);
- } else {
-
- mutex_lock(&lt9611uxc->ocm_lock);
- connected = lt9611uxc->hdmi_connected;
- mutex_unlock(&lt9611uxc->ocm_lock);
+ mutex_lock(&lt9611uxc->ocm_lock);
+ connected = lt9611uxc->hdmi_connected;
+ mutex_unlock(&lt9611uxc->ocm_lock);
- drm_bridge_hpd_notify(&lt9611uxc->bridge,
- connected ?
- connector_status_connected :
- connector_status_disconnected);
- }
+ drm_bridge_hpd_notify(&lt9611uxc->bridge,
+ connected ?
+ connector_status_connected :
+ connector_status_disconnected);
}
static void lt9611uxc_reset(struct lt9611uxc *lt9611uxc)
@@ -289,82 +279,13 @@ static struct mipi_dsi_device *lt9611uxc_attach_dsi(struct lt9611uxc *lt9611uxc,
return dsi;
}
-static int lt9611uxc_connector_get_modes(struct drm_connector *connector)
-{
- struct lt9611uxc *lt9611uxc = connector_to_lt9611uxc(connector);
- const struct drm_edid *drm_edid;
- int count;
-
- drm_edid = drm_bridge_edid_read(&lt9611uxc->bridge, connector);
- drm_edid_connector_update(connector, drm_edid);
- count = drm_edid_connector_add_modes(connector);
- drm_edid_free(drm_edid);
-
- return count;
-}
-
-static enum drm_connector_status lt9611uxc_connector_detect(struct drm_connector *connector,
- bool force)
-{
- struct lt9611uxc *lt9611uxc = connector_to_lt9611uxc(connector);
-
- return lt9611uxc->bridge.funcs->detect(&lt9611uxc->bridge);
-}
-
-static enum drm_mode_status lt9611uxc_connector_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
-{
- struct lt9611uxc_mode *lt9611uxc_mode = lt9611uxc_find_mode(mode);
-
- return lt9611uxc_mode ? MODE_OK : MODE_BAD;
-}
-
-static const struct drm_connector_helper_funcs lt9611uxc_bridge_connector_helper_funcs = {
- .get_modes = lt9611uxc_connector_get_modes,
- .mode_valid = lt9611uxc_connector_mode_valid,
-};
-
-static const struct drm_connector_funcs lt9611uxc_bridge_connector_funcs = {
- .fill_modes = drm_helper_probe_single_connector_modes,
- .detect = lt9611uxc_connector_detect,
- .destroy = drm_connector_cleanup,
- .reset = drm_atomic_helper_connector_reset,
- .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
- .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
-};
-
-static int lt9611uxc_connector_init(struct drm_bridge *bridge, struct lt9611uxc *lt9611uxc)
-{
- int ret;
-
- lt9611uxc->connector.polled = DRM_CONNECTOR_POLL_HPD;
-
- drm_connector_helper_add(&lt9611uxc->connector,
- &lt9611uxc_bridge_connector_helper_funcs);
- ret = drm_connector_init(bridge->dev, &lt9611uxc->connector,
- &lt9611uxc_bridge_connector_funcs,
- DRM_MODE_CONNECTOR_HDMIA);
- if (ret) {
- DRM_ERROR("Failed to initialize connector with drm\n");
- return ret;
- }
-
- return drm_connector_attach_encoder(&lt9611uxc->connector, bridge->encoder);
-}
-
static int lt9611uxc_bridge_attach(struct drm_bridge *bridge,
enum drm_bridge_attach_flags flags)
{
struct lt9611uxc *lt9611uxc = bridge_to_lt9611uxc(bridge);
- int ret;
-
- if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR)) {
- ret = lt9611uxc_connector_init(bridge, lt9611uxc);
- if (ret < 0)
- return ret;
- }
- return 0;
+ return drm_bridge_attach(bridge->encoder, lt9611uxc->next_bridge,
+ bridge, flags);
}
static enum drm_mode_status
@@ -525,7 +446,7 @@ static int lt9611uxc_parse_dt(struct device *dev,
lt9611uxc->dsi1_node = of_graph_get_remote_node(dev->of_node, 1, -1);
- return 0;
+ return drm_of_find_panel_or_bridge(dev->of_node, 2, -1, NULL, &lt9611uxc->next_bridge);
}
static int lt9611uxc_gpio_init(struct lt9611uxc *lt9611uxc)
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c b/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c
index c4e9d96933dc..0fb02e4e7f4e 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c
@@ -722,7 +722,12 @@ static void dw_mipi_dsi_dpi_config(struct dw_mipi_dsi *dsi,
static void dw_mipi_dsi_packet_handler_config(struct dw_mipi_dsi *dsi)
{
- dsi_write(dsi, DSI_PCKHDL_CFG, CRC_RX_EN | ECC_RX_EN | BTA_EN);
+ u32 val = CRC_RX_EN | ECC_RX_EN | BTA_EN | EOTP_TX_EN;
+
+ if (dsi->mode_flags & MIPI_DSI_MODE_NO_EOT_PACKET)
+ val &= ~EOTP_TX_EN;
+
+ dsi_write(dsi, DSI_PCKHDL_CFG, val);
}
static void dw_mipi_dsi_video_packet_config(struct dw_mipi_dsi *dsi,
diff --git a/drivers/gpu/drm/bridge/tc358767.c b/drivers/gpu/drm/bridge/tc358767.c
index b8b7a227addf..290e2532fab1 100644
--- a/drivers/gpu/drm/bridge/tc358767.c
+++ b/drivers/gpu/drm/bridge/tc358767.c
@@ -241,6 +241,10 @@
/* Link Training */
#define DP0_SRCCTRL 0x06a0
+#define DP0_SRCCTRL_PRE1 GENMASK(29, 28)
+#define DP0_SRCCTRL_SWG1 GENMASK(25, 24)
+#define DP0_SRCCTRL_PRE0 GENMASK(21, 20)
+#define DP0_SRCCTRL_SWG0 GENMASK(17, 16)
#define DP0_SRCCTRL_SCRMBLDIS BIT(13)
#define DP0_SRCCTRL_EN810B BIT(12)
#define DP0_SRCCTRL_NOTP (0 << 8)
@@ -278,6 +282,8 @@
#define AUDIFDATA6 0x0720 /* DP0 Audio Info Frame Bytes 27 to 24 */
#define DP1_SRCCTRL 0x07a0 /* DP1 Control Register */
+#define DP1_SRCCTRL_PRE GENMASK(21, 20)
+#define DP1_SRCCTRL_SWG GENMASK(17, 16)
/* PHY */
#define DP_PHY_CTRL 0x0800
@@ -369,6 +375,7 @@ struct tc_data {
u32 rev;
u8 assr;
+ u8 pre_emphasis[2];
struct gpio_desc *sd_gpio;
struct gpio_desc *reset_gpio;
@@ -1090,13 +1097,17 @@ static int tc_main_link_enable(struct tc_data *tc)
return ret;
}
- ret = regmap_write(tc->regmap, DP0_SRCCTRL, tc_srcctrl(tc));
+ ret = regmap_write(tc->regmap, DP0_SRCCTRL,
+ tc_srcctrl(tc) |
+ FIELD_PREP(DP0_SRCCTRL_PRE0, tc->pre_emphasis[0]) |
+ FIELD_PREP(DP0_SRCCTRL_PRE1, tc->pre_emphasis[1]));
if (ret)
return ret;
/* SSCG and BW27 on DP1 must be set to the same as on DP0 */
ret = regmap_write(tc->regmap, DP1_SRCCTRL,
(tc->link.spread ? DP0_SRCCTRL_SSCG : 0) |
- ((tc->link.rate != 162000) ? DP0_SRCCTRL_BW27 : 0));
+ ((tc->link.rate != 162000) ? DP0_SRCCTRL_BW27 : 0) |
+ FIELD_PREP(DP1_SRCCTRL_PRE, tc->pre_emphasis[1]));
if (ret)
return ret;
@@ -1188,8 +1199,10 @@ static int tc_main_link_enable(struct tc_data *tc)
goto err_dpcd_write;
/* Reset voltage-swing & pre-emphasis */
- tmp[0] = tmp[1] = DP_TRAIN_VOLTAGE_SWING_LEVEL_0 |
- DP_TRAIN_PRE_EMPH_LEVEL_0;
+ tmp[0] = DP_TRAIN_VOLTAGE_SWING_LEVEL_0 |
+ FIELD_PREP(DP_TRAIN_PRE_EMPHASIS_MASK, tc->pre_emphasis[0]);
+ tmp[1] = DP_TRAIN_VOLTAGE_SWING_LEVEL_0 |
+ FIELD_PREP(DP_TRAIN_PRE_EMPHASIS_MASK, tc->pre_emphasis[1]);
ret = drm_dp_dpcd_write(aux, DP_TRAINING_LANE0_SET, tmp, 2);
if (ret < 0)
goto err_dpcd_write;
@@ -1213,7 +1226,9 @@ static int tc_main_link_enable(struct tc_data *tc)
ret = regmap_write(tc->regmap, DP0_SRCCTRL,
tc_srcctrl(tc) | DP0_SRCCTRL_SCRMBLDIS |
DP0_SRCCTRL_AUTOCORRECT |
- DP0_SRCCTRL_TP1);
+ DP0_SRCCTRL_TP1 |
+ FIELD_PREP(DP0_SRCCTRL_PRE0, tc->pre_emphasis[0]) |
+ FIELD_PREP(DP0_SRCCTRL_PRE1, tc->pre_emphasis[1]));
if (ret)
return ret;
@@ -1248,7 +1263,9 @@ static int tc_main_link_enable(struct tc_data *tc)
ret = regmap_write(tc->regmap, DP0_SRCCTRL,
tc_srcctrl(tc) | DP0_SRCCTRL_SCRMBLDIS |
DP0_SRCCTRL_AUTOCORRECT |
- DP0_SRCCTRL_TP2);
+ DP0_SRCCTRL_TP2 |
+ FIELD_PREP(DP0_SRCCTRL_PRE0, tc->pre_emphasis[0]) |
+ FIELD_PREP(DP0_SRCCTRL_PRE1, tc->pre_emphasis[1]));
if (ret)
return ret;
@@ -1274,7 +1291,9 @@ static int tc_main_link_enable(struct tc_data *tc)
/* Clear Training Pattern, set AutoCorrect Mode = 1 */
ret = regmap_write(tc->regmap, DP0_SRCCTRL, tc_srcctrl(tc) |
- DP0_SRCCTRL_AUTOCORRECT);
+ DP0_SRCCTRL_AUTOCORRECT |
+ FIELD_PREP(DP0_SRCCTRL_PRE0, tc->pre_emphasis[0]) |
+ FIELD_PREP(DP0_SRCCTRL_PRE1, tc->pre_emphasis[1]));
if (ret)
return ret;
@@ -2363,6 +2382,18 @@ static int tc_probe_bridge_endpoint(struct tc_data *tc)
return -EINVAL;
}
mode |= BIT(endpoint.port);
+
+ if (endpoint.port == 2) {
+ of_property_read_u8_array(node, "toshiba,pre-emphasis",
+ tc->pre_emphasis,
+ ARRAY_SIZE(tc->pre_emphasis));
+
+ if (tc->pre_emphasis[0] < 0 || tc->pre_emphasis[0] > 2 ||
+ tc->pre_emphasis[1] < 0 || tc->pre_emphasis[1] > 2) {
+ dev_err(dev, "Incorrect Pre-Emphasis setting, use either 0=0dB 1=3.5dB 2=6dB\n");
+ return -EINVAL;
+ }
+ }
}
if (mode == mode_dpi_to_edp || mode == mode_dpi_to_dp) {
diff --git a/drivers/gpu/drm/ci/arm64.config b/drivers/gpu/drm/ci/arm64.config
index 4140303d6260..66e70ced796f 100644
--- a/drivers/gpu/drm/ci/arm64.config
+++ b/drivers/gpu/drm/ci/arm64.config
@@ -187,6 +187,7 @@ CONFIG_MTK_DEVAPC=y
CONFIG_PWM_MTK_DISP=y
CONFIG_MTK_CMDQ=y
CONFIG_REGULATOR_DA9211=y
+CONFIG_DRM_ANALOGIX_ANX7625=y
# For nouveau. Note that DRM must be a module so that it's loaded after NFS is up to provide the firmware.
CONFIG_ARCH_TEGRA=y
diff --git a/drivers/gpu/drm/ci/gitlab-ci.yml b/drivers/gpu/drm/ci/gitlab-ci.yml
index 80fb0f57ae46..6d2cefa7f15e 100644
--- a/drivers/gpu/drm/ci/gitlab-ci.yml
+++ b/drivers/gpu/drm/ci/gitlab-ci.yml
@@ -2,10 +2,10 @@ variables:
DRM_CI_PROJECT_PATH: &drm-ci-project-path mesa/mesa
DRM_CI_COMMIT_SHA: &drm-ci-commit-sha e2b9c5a9e3e4f9b532067af8022eaef8d6fc6c00
- UPSTREAM_REPO: git://anongit.freedesktop.org/drm/drm
+ UPSTREAM_REPO: https://gitlab.freedesktop.org/drm/kernel.git
TARGET_BRANCH: drm-next
- IGT_VERSION: 0df7b9b97f9da0e364f5ee30fe331004b8c86b56
+ IGT_VERSION: f13702b8e4e847c56da3ef6f0969065d686049c5
DEQP_RUNNER_GIT_URL: https://gitlab.freedesktop.org/anholt/deqp-runner.git
DEQP_RUNNER_GIT_TAG: v0.15.0
@@ -121,8 +121,9 @@ stages:
- mediatek
- meson
- msm
+ - panfrost
+ - powervr
- rockchip
- - virtio-gpu
- software-driver
# YAML anchors for rule conditions
diff --git a/drivers/gpu/drm/ci/igt_runner.sh b/drivers/gpu/drm/ci/igt_runner.sh
index 79f41d7da772..f38836ec837c 100755
--- a/drivers/gpu/drm/ci/igt_runner.sh
+++ b/drivers/gpu/drm/ci/igt_runner.sh
@@ -20,16 +20,6 @@ cat /sys/kernel/debug/dri/*/state
set -e
case "$DRIVER_NAME" in
- rockchip|meson)
- export IGT_FORCE_DRIVER="panfrost"
- ;;
- mediatek)
- if [ "$GPU_VERSION" = "mt8173" ]; then
- export IGT_FORCE_DRIVER=${DRIVER_NAME}
- elif [ "$GPU_VERSION" = "mt8183" ]; then
- export IGT_FORCE_DRIVER="panfrost"
- fi
- ;;
amdgpu|vkms)
# Cannot use HWCI_KERNEL_MODULES as at that point we don't have the module in /lib
mv /install/modules/lib/modules/* /lib/modules/. || true
@@ -80,6 +70,7 @@ igt-runner \
--igt-folder /igt/libexec/igt-gpu-tools \
--caselist $TESTLIST \
--output /results \
+ -vvvv \
$IGT_SKIPS \
$IGT_FLAKES \
$IGT_FAILS \
diff --git a/drivers/gpu/drm/ci/test.yml b/drivers/gpu/drm/ci/test.yml
index ee908b66aad2..b22b2cf8f06f 100644
--- a/drivers/gpu/drm/ci/test.yml
+++ b/drivers/gpu/drm/ci/test.yml
@@ -160,33 +160,57 @@ msm:sdm845:
script:
- ./install/bare-metal/cros-servo.sh
-rockchip:rk3288:
- extends:
- - .lava-igt:arm32
+.rockchip-device:
+ variables:
+ DTB: ${DEVICE_TYPE}
+ BOOT_METHOD: depthcharge
+
+.rockchip-display:
stage: rockchip
variables:
DRIVER_NAME: rockchip
+
+.rk3288:
+ extends:
+ - .lava-igt:arm32
+ - .rockchip-device
+ variables:
DEVICE_TYPE: rk3288-veyron-jaq
- DTB: ${DEVICE_TYPE}
- BOOT_METHOD: depthcharge
- KERNEL_IMAGE_TYPE: "zimage"
GPU_VERSION: rk3288
+ KERNEL_IMAGE_TYPE: "zimage"
RUNNER_TAG: mesa-ci-x86-64-lava-rk3288-veyron-jaq
-rockchip:rk3399:
+.rk3399:
extends:
- .lava-igt:arm64
- stage: rockchip
+ - .rockchip-device
parallel: 2
variables:
- DRIVER_NAME: rockchip
DEVICE_TYPE: rk3399-gru-kevin
- DTB: ${DEVICE_TYPE}
- BOOT_METHOD: depthcharge
- KERNEL_IMAGE_TYPE: ""
GPU_VERSION: rk3399
+ KERNEL_IMAGE_TYPE: ""
RUNNER_TAG: mesa-ci-x86-64-lava-rk3399-gru-kevin
+rockchip:rk3288:
+ extends:
+ - .rk3288
+ - .rockchip-display
+
+panfrost:rk3288:
+ extends:
+ - .rk3288
+ - .panfrost-gpu
+
+rockchip:rk3399:
+ extends:
+ - .rk3399
+ - .rockchip-display
+
+panfrost:rk3399:
+ extends:
+ - .rk3399
+ - .panfrost-gpu
+
.i915:
extends:
- .lava-igt:x86_64
@@ -280,63 +304,114 @@ amdgpu:stoney:
GPU_VERSION: stoney
RUNNER_TAG: mesa-ci-x86-64-lava-hp-11A-G6-EE-grunt
-.mediatek:
+.mediatek-device:
extends:
- .lava-igt:arm64
stage: mediatek
variables:
- DRIVER_NAME: mediatek
DTB: ${DEVICE_TYPE}
BOOT_METHOD: depthcharge
KERNEL_IMAGE_TYPE: ""
-mediatek:mt8173:
+.mediatek-display:
+ stage: mediatek
+ variables:
+ DRIVER_NAME: mediatek
+
+.powervr-gpu:
+ stage: powervr
+ variables:
+ DRIVER_NAME: powervr
+
+.panfrost-gpu:
+ stage: panfrost
+ variables:
+ DRIVER_NAME: panfrost
+
+.mt8173:
extends:
- - .mediatek
+ - .mediatek-device
parallel: 4
variables:
DEVICE_TYPE: mt8173-elm-hana
GPU_VERSION: mt8173
RUNNER_TAG: mesa-ci-x86-64-lava-mt8173-elm-hana
-mediatek:mt8183:
+.mt8183:
extends:
- - .mediatek
+ - .mediatek-device
parallel: 3
variables:
DEVICE_TYPE: mt8183-kukui-jacuzzi-juniper-sku16
GPU_VERSION: mt8183
RUNNER_TAG: mesa-ci-x86-64-lava-mt8183-kukui-jacuzzi-juniper-sku16
+mediatek:mt8173:
+ extends:
+ - .mt8173
+ - .mediatek-display
+
+powervr:mt8173:
+ extends:
+ - .mt8173
+ - .powervr-gpu
+ rules:
+ # TODO: powervr driver was merged in linux kernel, but there's no mediatek support yet
+ # Remove the rule once mediatek support is added for powervr
+ - when: never
+
+mediatek:mt8183:
+ extends:
+ - .mt8183
+ - .mediatek-display
+
+panfrost:mt8183:
+ extends:
+ - .mt8183
+ - .panfrost-gpu
+
# drm-mtk doesn't even probe yet in mainline for mt8192
.mediatek:mt8192:
extends:
- - .mediatek
+ - .mediatek-device
parallel: 3
variables:
DEVICE_TYPE: mt8192-asurada-spherion-r0
GPU_VERSION: mt8192
RUNNER_TAG: mesa-ci-x86-64-lava-mt8192-asurada-spherion-r0
-.meson:
+.meson-device:
extends:
- .lava-igt:arm64
- stage: meson
variables:
- DRIVER_NAME: meson
DTB: ${DEVICE_TYPE}
BOOT_METHOD: u-boot
KERNEL_IMAGE_TYPE: "image"
-meson:g12b:
+.meson-display:
+ stage: meson
+ variables:
+ DRIVER_NAME: meson
+
+.g12b:
extends:
- - .meson
+ - .meson-device
parallel: 3
variables:
DEVICE_TYPE: meson-g12b-a311d-khadas-vim3
GPU_VERSION: g12b
RUNNER_TAG: mesa-ci-x86-64-lava-meson-g12b-a311d-khadas-vim3
+meson:g12b:
+ extends:
+ - .g12b
+ - .meson-display
+
+panfrost:g12b:
+ extends:
+ - .g12b
+ - .panfrost-gpu
+
virtio_gpu:none:
stage: software-driver
variables:
diff --git a/drivers/gpu/drm/ci/xfails/amdgpu-stoney-fails.txt b/drivers/gpu/drm/ci/xfails/amdgpu-stoney-fails.txt
index e8c2f4044a92..8e2fed6d76a3 100644
--- a/drivers/gpu/drm/ci/xfails/amdgpu-stoney-fails.txt
+++ b/drivers/gpu/drm/ci/xfails/amdgpu-stoney-fails.txt
@@ -30,6 +30,7 @@ kms_cursor_crc@cursor-random-64x64,Fail
kms_cursor_crc@cursor-size-change,Fail
kms_cursor_crc@cursor-sliding-64x21,Fail
kms_cursor_crc@cursor-sliding-64x64,Fail
+kms_cursor_edge_walk@64x64-left-edge,Fail
kms_flip@flip-vs-modeset-vs-hang,Fail
kms_flip@flip-vs-panning-vs-hang,Fail
kms_lease@lease-uevent,Fail
@@ -37,4 +38,3 @@ kms_plane@pixel-format,Fail
kms_plane_cursor@primary,Fail
kms_rotation_crc@primary-rotation-180,Fail
perf@i915-ref-count,Fail
-tools_test@tools_test,Fail
diff --git a/drivers/gpu/drm/ci/xfails/amdgpu-stoney-flakes.txt b/drivers/gpu/drm/ci/xfails/amdgpu-stoney-flakes.txt
index ea512ff8c352..e4faa96fa000 100644
--- a/drivers/gpu/drm/ci/xfails/amdgpu-stoney-flakes.txt
+++ b/drivers/gpu/drm/ci/xfails/amdgpu-stoney-flakes.txt
@@ -1,8 +1,20 @@
# Board Name: hp-11A-G6-EE-grunt
# Bug Report: https://lore.kernel.org/amd-gfx/[email protected]/T/#u
+# Failure Rate: 50
# IGT Version: 1.28-g0df7b9b97
# Linux Version: 6.9.0-rc7
-# Failure Rate: 50
kms_async_flips@async-flip-with-page-flip-events
+
+# Board Name: hp-11A-G6-EE-grunt
+# Bug Report: https://lore.kernel.org/amd-gfx/[email protected]/T/#u
+# Failure Rate: 50
+# IGT Version: 1.28-g0df7b9b97
+# Linux Version: 6.9.0-rc7
kms_async_flips@crc
+
+# Board Name: hp-11A-G6-EE-grunt
+# Bug Report: https://lore.kernel.org/amd-gfx/[email protected]/T/#u
+# Failure Rate: 50
+# IGT Version: 1.28-g0df7b9b97
+# Linux Version: 6.9.0-rc7
kms_plane@pixel-format-source-clamping
diff --git a/drivers/gpu/drm/ci/xfails/amdgpu-stoney-skips.txt b/drivers/gpu/drm/ci/xfails/amdgpu-stoney-skips.txt
index 3a2ce45d3cb9..f41b3e112976 100644
--- a/drivers/gpu/drm/ci/xfails/amdgpu-stoney-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/amdgpu-stoney-skips.txt
@@ -2,9 +2,9 @@
.*suspend.*
# Skip driver specific tests
-msm_.*
+^msm.*
nouveau_.*
-panfrost_.*
+^panfrost.*
^v3d.*
^vc4.*
^vmwgfx*
@@ -13,6 +13,7 @@ panfrost_.*
gem_.*
i915_.*
xe_.*
+tools_test.*
# Currently fails and causes coverage loss for other tests
# since core_getversion also fails.
diff --git a/drivers/gpu/drm/ci/xfails/i915-amly-fails.txt b/drivers/gpu/drm/ci/xfails/i915-amly-fails.txt
index 6641520ac587..9b84f68a5122 100644
--- a/drivers/gpu/drm/ci/xfails/i915-amly-fails.txt
+++ b/drivers/gpu/drm/ci/xfails/i915-amly-fails.txt
@@ -6,11 +6,11 @@ i915_module_load@reload-no-display,Fail
i915_module_load@resize-bar,Fail
i915_pm_rpm@gem-execbuf-stress,Timeout
i915_pm_rpm@module-reload,Fail
-kms_async_flips@invalid-async-flip,Timeout
-kms_atomic_transition@modeset-transition-fencing,Timeout
kms_ccs@crc-primary-rotation-180-yf-tiled-ccs,Timeout
+kms_cursor_legacy@short-flip-before-cursor-atomic-transitions,Timeout
kms_fb_coherency@memset-crc,Crash
-kms_flip@flip-vs-dpms-off-vs-modeset,Timeout
+kms_flip@busy-flip,Timeout
+kms_flip@single-buffer-flip-vs-dpms-off-vs-modeset-interruptible,Fail
kms_flip_scaled_crc@flip-32bpp-linear-to-64bpp-linear-downscaling,Fail
kms_flip_scaled_crc@flip-32bpp-linear-to-64bpp-linear-upscaling,Fail
kms_flip_scaled_crc@flip-32bpp-xtile-to-64bpp-xtile-downscaling,Fail
@@ -33,16 +33,20 @@ kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytile-downscaling,Fail
kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytile-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytilegen12rcccs-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytilercccs-downscaling,Fail
+kms_frontbuffer_tracking@fbc-rgb565-draw-mmap-cpu,Timeout
kms_lease@lease-uevent,Fail
kms_plane_alpha_blend@alpha-basic,Fail
kms_plane_alpha_blend@alpha-opaque-fb,Fail
kms_plane_alpha_blend@alpha-transparent-fb,Fail
kms_plane_alpha_blend@constant-alpha-max,Fail
kms_plane_scaling@plane-scaler-with-clipping-clamping-rotation,Timeout
-kms_pm_rpm@modeset-lpsp-stress,Timeout
+kms_plane_scaling@planes-upscale-factor-0-25-downscale-factor-0-5,Timeout
kms_pm_rpm@modeset-stress-extra-wait,Timeout
kms_pm_rpm@universal-planes,Timeout
kms_pm_rpm@universal-planes-dpms,Timeout
+kms_prop_blob@invalid-set-prop,Fail
+kms_rotation_crc@primary-rotation-180,Timeout
+kms_vblank@query-forked-hang,Timeout
perf@i915-ref-count,Fail
perf_pmu@module-unload,Fail
perf_pmu@rc6,Crash
diff --git a/drivers/gpu/drm/ci/xfails/i915-amly-flakes.txt b/drivers/gpu/drm/ci/xfails/i915-amly-flakes.txt
index 0a76547a103d..581f0da4d0f2 100644
--- a/drivers/gpu/drm/ci/xfails/i915-amly-flakes.txt
+++ b/drivers/gpu/drm/ci/xfails/i915-amly-flakes.txt
@@ -1,9 +1,48 @@
# Board Name: asus-C433TA-AJ0005-rammus
# Bug Report: https://lore.kernel.org/intel-gfx/[email protected]/T/#u
+# Failure Rate: 50
# IGT Version: 1.28-g0df7b9b97
# Linux Version: 6.9.0-rc7
-# Failure Rate: 50
i915_hangman@engine-engine-error
+
+# Board Name: asus-C433TA-AJ0005-rammus
+# Bug Report: https://lore.kernel.org/intel-gfx/[email protected]/T/#u
+# Failure Rate: 50
+# IGT Version: 1.28-g0df7b9b97
+# Linux Version: 6.9.0-rc7
i915_hangman@gt-engine-hang
+
+# Board Name: asus-C433TA-AJ0005-rammus
+# Bug Report: https://lore.kernel.org/intel-gfx/[email protected]/T/#u
+# Failure Rate: 50
+# IGT Version: 1.28-g0df7b9b97
+# Linux Version: 6.9.0-rc7
kms_async_flips@crc
+
+# Board Name: asus-C433TA-AJ0005-rammus
+# Bug Report: https://lore.kernel.org/intel-gfx/[email protected]/T/#u
+# Failure Rate: 50
+# IGT Version: 1.28-g0df7b9b97
+# Linux Version: 6.9.0-rc7
kms_universal_plane@cursor-fb-leak
+
+# Board Name: asus-C433TA-AJ0005-rammus
+# Bug Report: https://lore.kernel.org/intel-gfx/[email protected]/T/#u
+# Failure Rate: 50
+# IGT Version: 1.28-gf13702b8e
+# Linux Version: 6.10.0-rc5
+kms_sysfs_edid_timing
+
+# Board Name: asus-C433TA-AJ0005-rammus
+# Bug Report: https://lore.kernel.org/intel-gfx/[email protected]/T/#u
+# Failure Rate: 50
+# IGT Version: 1.28-gf13702b8e
+# Linux Version: 6.10.0-rc5
+i915_hangman@engine-engine-hang
+
+# Board Name: asus-C433TA-AJ0005-rammus
+# Bug Report: https://lore.kernel.org/intel-gfx/[email protected]/T/#u
+# Failure Rate: 50
+# IGT Version: 1.28-gf13702b8e
+# Linux Version: 6.10.0-rc5
+kms_pm_rpm@modeset-lpsp-stress
diff --git a/drivers/gpu/drm/ci/xfails/i915-amly-skips.txt b/drivers/gpu/drm/ci/xfails/i915-amly-skips.txt
index 5663ed0420a7..5186ba3dbbc6 100644
--- a/drivers/gpu/drm/ci/xfails/i915-amly-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/i915-amly-skips.txt
@@ -5,9 +5,9 @@ kms_plane_scaling@invalid-parameters
# Skip driver specific tests
^amdgpu.*
-msm_.*
+^msm.*
nouveau_.*
-panfrost_.*
+^panfrost.*
^v3d.*
^vc4.*
^vmwgfx*
@@ -19,6 +19,7 @@ gem_.*
i915_pm_rc6_residency.*
i915_suspend.*
kms_scaling_modes.*
+i915_pm_rpm.*
# Kernel panic
drm_fdinfo.*
diff --git a/drivers/gpu/drm/ci/xfails/i915-apl-flakes.txt b/drivers/gpu/drm/ci/xfails/i915-apl-flakes.txt
index cb010c153a6a..4663d4d13f35 100644
--- a/drivers/gpu/drm/ci/xfails/i915-apl-flakes.txt
+++ b/drivers/gpu/drm/ci/xfails/i915-apl-flakes.txt
@@ -1,6 +1,6 @@
# Board Name: asus-C523NA-A20057-coral
# Bug Report: https://lore.kernel.org/intel-gfx/[email protected]/T/#u
+# Failure Rate: 50
# IGT Version: 1.28-g0df7b9b97
# Linux Version: 6.9.0-rc7
-# Failure Rate: 50
kms_fb_coherency@memset-crc
diff --git a/drivers/gpu/drm/ci/xfails/i915-apl-skips.txt b/drivers/gpu/drm/ci/xfails/i915-apl-skips.txt
index ab588e7a447c..4f50e0240ff4 100644
--- a/drivers/gpu/drm/ci/xfails/i915-apl-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/i915-apl-skips.txt
@@ -7,9 +7,9 @@ kms_3d
# Skip driver specific tests
^amdgpu.*
-msm_.*
+^msm.*
nouveau_.*
-panfrost_.*
+^panfrost.*
^v3d.*
^vc4.*
^vmwgfx*
diff --git a/drivers/gpu/drm/ci/xfails/i915-cml-fails.txt b/drivers/gpu/drm/ci/xfails/i915-cml-fails.txt
index 26cd62bbf30a..2723e2832797 100644
--- a/drivers/gpu/drm/ci/xfails/i915-cml-fails.txt
+++ b/drivers/gpu/drm/ci/xfails/i915-cml-fails.txt
@@ -9,11 +9,10 @@ i915_pipe_stress@stress-xrgb8888-ytiled,Fail
i915_pm_rpm@gem-execbuf-stress,Timeout
i915_pm_rpm@module-reload,Fail
i915_pm_rpm@system-suspend-execbuf,Timeout
-kms_async_flips@invalid-async-flip,Timeout
-kms_atomic_transition@modeset-transition-fencing,Timeout
kms_ccs@crc-primary-rotation-180-yf-tiled-ccs,Timeout
kms_fb_coherency@memset-crc,Crash
-kms_flip@flip-vs-dpms-off-vs-modeset,Timeout
+kms_flip@busy-flip,Timeout
+kms_flip@single-buffer-flip-vs-dpms-off-vs-modeset-interruptible,Fail
kms_flip_scaled_crc@flip-32bpp-linear-to-64bpp-linear-downscaling,Fail
kms_flip_scaled_crc@flip-32bpp-linear-to-64bpp-linear-upscaling,Fail
kms_flip_scaled_crc@flip-32bpp-xtile-to-64bpp-xtile-downscaling,Fail
@@ -41,20 +40,25 @@ kms_plane_alpha_blend@alpha-basic,Fail
kms_plane_alpha_blend@alpha-opaque-fb,Fail
kms_plane_alpha_blend@alpha-transparent-fb,Fail
kms_plane_alpha_blend@constant-alpha-max,Fail
-kms_plane_alpha_blend@constant-alpha-min,Fail
kms_plane_scaling@plane-scaler-with-clipping-clamping-rotation,Timeout
+kms_plane_scaling@planes-upscale-factor-0-25-downscale-factor-0-5,Timeout
kms_pm_rpm@modeset-stress-extra-wait,Timeout
kms_pm_rpm@universal-planes,Timeout
kms_pm_rpm@universal-planes-dpms,Timeout
+kms_prop_blob@invalid-set-prop,Fail
+kms_psr2_sf@cursor-plane-update-sf,Fail
kms_psr2_sf@fbc-plane-move-sf-dmg-area,Timeout
kms_psr2_sf@overlay-plane-update-continuous-sf,Fail
kms_psr2_sf@overlay-plane-update-sf-dmg-area,Fail
+kms_psr2_sf@overlay-primary-update-sf-dmg-area,Fail
+kms_psr2_sf@plane-move-sf-dmg-area,Fail
kms_psr2_sf@primary-plane-update-sf-dmg-area,Fail
kms_psr2_sf@primary-plane-update-sf-dmg-area-big-fb,Fail
kms_psr2_su@page_flip-NV12,Fail
kms_psr2_su@page_flip-P010,Fail
-kms_psr@psr-sprite-render,Timeout
+kms_rotation_crc@primary-rotation-180,Timeout
kms_setmode@basic,Fail
+kms_vblank@query-forked-hang,Timeout
perf@i915-ref-count,Fail
perf_pmu@module-unload,Fail
perf_pmu@rc6,Crash
diff --git a/drivers/gpu/drm/ci/xfails/i915-cml-flakes.txt b/drivers/gpu/drm/ci/xfails/i915-cml-flakes.txt
index bb560ff1e2cd..58a6001abb28 100644
--- a/drivers/gpu/drm/ci/xfails/i915-cml-flakes.txt
+++ b/drivers/gpu/drm/ci/xfails/i915-cml-flakes.txt
@@ -1,6 +1,13 @@
# Board Name: asus-C436FA-Flip-hatch
# Bug Report: https://lore.kernel.org/intel-gfx/[email protected]/T/#u
+# Failure Rate: 50
# IGT Version: 1.28-g0df7b9b97
# Linux Version: 6.9.0-rc7
-# Failure Rate: 50
kms_plane_alpha_blend@constant-alpha-min
+
+# Board Name: asus-C436FA-Flip-hatch
+# Bug Report: https://lore.kernel.org/intel-gfx/[email protected]/T/#u
+# Failure Rate: 50
+# IGT Version: 1.28-gf13702b8e
+# Linux Version: 6.10.0-rc5
+kms_atomic_transition@plane-all-modeset-transition-internal-panels
diff --git a/drivers/gpu/drm/ci/xfails/i915-cml-skips.txt b/drivers/gpu/drm/ci/xfails/i915-cml-skips.txt
index 93b7736fffbb..9d753d97c9ab 100644
--- a/drivers/gpu/drm/ci/xfails/i915-cml-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/i915-cml-skips.txt
@@ -3,9 +3,9 @@ kms_plane_scaling@invalid-parameters
# Skip driver specific tests
^amdgpu.*
-msm_.*
+^msm.*
nouveau_.*
-panfrost_.*
+^panfrost.*
^v3d.*
^vc4.*
^vmwgfx*
@@ -19,6 +19,7 @@ i915_suspend.*
xe_module_load.*
api_intel_allocator.*
kms_cursor_legacy.*
+i915_pm_rpm.*
# Kernel panic
drm_fdinfo.*
diff --git a/drivers/gpu/drm/ci/xfails/i915-glk-fails.txt b/drivers/gpu/drm/ci/xfails/i915-glk-fails.txt
index fca15b487929..4821c9adefd1 100644
--- a/drivers/gpu/drm/ci/xfails/i915-glk-fails.txt
+++ b/drivers/gpu/drm/ci/xfails/i915-glk-fails.txt
@@ -1,20 +1,16 @@
core_setmaster@master-drop-set-user,Fail
+core_setmaster_vs_auth,Fail
i915_module_load@load,Fail
i915_module_load@reload,Fail
i915_module_load@reload-no-display,Fail
i915_module_load@resize-bar,Fail
-kms_async_flips@invalid-async-flip,Timeout
-kms_atomic_transition@modeset-transition-fencing,Timeout
-kms_big_fb@linear-16bpp-rotate-0,Fail
-kms_big_fb@linear-16bpp-rotate-180,Fail
-kms_big_fb@linear-32bpp-rotate-0,Fail
-kms_big_fb@linear-32bpp-rotate-180,Fail
-kms_big_fb@linear-8bpp-rotate-0,Fail
-kms_big_fb@linear-8bpp-rotate-180,Fail
-kms_big_fb@linear-max-hw-stride-32bpp-rotate-0,Fail
+kms_cursor_legacy@short-flip-before-cursor-atomic-transitions,Timeout
kms_dirtyfb@default-dirtyfb-ioctl,Fail
-kms_draw_crc@draw-method-render,Fail
-kms_flip@flip-vs-dpms-off-vs-modeset,Timeout
+kms_dirtyfb@drrs-dirtyfb-ioctl,Fail
+kms_dirtyfb@fbc-dirtyfb-ioctl,Fail
+kms_flip@blocking-wf_vblank,Fail
+kms_flip@busy-flip,Timeout
+kms_flip@single-buffer-flip-vs-dpms-off-vs-modeset-interruptible,Fail
kms_flip@wf_vblank-ts-check,Fail
kms_flip@wf_vblank-ts-check-interruptible,Fail
kms_flip_scaled_crc@flip-32bpp-linear-to-64bpp-linear-downscaling,Fail
@@ -26,6 +22,7 @@ kms_flip_scaled_crc@flip-32bpp-ytile-to-64bpp-ytile-upscaling,Fail
kms_flip_scaled_crc@flip-32bpp-ytileccs-to-64bpp-ytile-downscaling,Fail
kms_flip_scaled_crc@flip-32bpp-ytileccs-to-64bpp-ytile-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-linear-to-16bpp-linear-downscaling,Fail
+kms_flip_scaled_crc@flip-64bpp-linear-to-16bpp-linear-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-linear-to-32bpp-linear-downscaling,Fail
kms_flip_scaled_crc@flip-64bpp-linear-to-32bpp-linear-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-xtile-to-16bpp-xtile-downscaling,Fail
@@ -38,19 +35,24 @@ kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytile-downscaling,Fail
kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytile-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytilegen12rcccs-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytilercccs-downscaling,Fail
+kms_frontbuffer_tracking@fbc-rgb565-draw-mmap-cpu,Timeout
kms_frontbuffer_tracking@fbc-tiling-linear,Fail
kms_frontbuffer_tracking@fbcdrrs-tiling-linear,Fail
kms_lease@lease-uevent,Fail
kms_plane_alpha_blend@alpha-opaque-fb,Fail
kms_plane_scaling@plane-scaler-with-clipping-clamping-rotation,Timeout
+kms_plane_scaling@planes-upscale-factor-0-25-downscale-factor-0-5,Timeout
kms_pm_rpm@legacy-planes,Timeout
kms_pm_rpm@legacy-planes-dpms,Timeout
kms_pm_rpm@modeset-stress-extra-wait,Timeout
kms_pm_rpm@universal-planes,Timeout
kms_pm_rpm@universal-planes-dpms,Timeout
+kms_prop_blob@invalid-set-prop,Fail
kms_rotation_crc@multiplane-rotation,Fail
kms_rotation_crc@multiplane-rotation-cropping-bottom,Fail
kms_rotation_crc@multiplane-rotation-cropping-top,Fail
+kms_rotation_crc@primary-rotation-180,Timeout
+kms_vblank@query-forked-hang,Timeout
perf@non-zero-reason,Timeout
sysfs_heartbeat_interval@long,Timeout
sysfs_heartbeat_interval@off,Timeout
diff --git a/drivers/gpu/drm/ci/xfails/i915-glk-flakes.txt b/drivers/gpu/drm/ci/xfails/i915-glk-flakes.txt
index 58fc424f8a42..077886b76093 100644
--- a/drivers/gpu/drm/ci/xfails/i915-glk-flakes.txt
+++ b/drivers/gpu/drm/ci/xfails/i915-glk-flakes.txt
@@ -1,7 +1,13 @@
# Board Name: hp-x360-12b-ca0010nr-n4020-octopus
# Bug Report: https://lore.kernel.org/intel-gfx/[email protected]/T/#u
+# Failure Rate: 50
# IGT Version: 1.28-g0df7b9b97
# Linux Version: 6.9.0-rc7
-# Failure Rate: 50
core_hotunplug@unplug-rescan
+
+# Board Name: hp-x360-12b-ca0010nr-n4020-octopus
+# Bug Report: https://lore.kernel.org/intel-gfx/[email protected]/T/#u
+# Failure Rate: 50
+# IGT Version: 1.28-g0df7b9b97
+# Linux Version: 6.9.0-rc7
kms_fb_coherency@memset-crc
diff --git a/drivers/gpu/drm/ci/xfails/i915-glk-skips.txt b/drivers/gpu/drm/ci/xfails/i915-glk-skips.txt
index b3226b2d9ba1..9c64146aed90 100644
--- a/drivers/gpu/drm/ci/xfails/i915-glk-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/i915-glk-skips.txt
@@ -6,9 +6,9 @@ kms_plane_scaling@invalid-parameters
# Skip driver specific tests
^amdgpu.*
-msm_.*
+^msm.*
nouveau_.*
-panfrost_.*
+^panfrost.*
^v3d.*
^vc4.*
^vmwgfx*
diff --git a/drivers/gpu/drm/ci/xfails/i915-kbl-fails.txt b/drivers/gpu/drm/ci/xfails/i915-kbl-fails.txt
index d4fba4f55ec1..1de04a3308c4 100644
--- a/drivers/gpu/drm/ci/xfails/i915-kbl-fails.txt
+++ b/drivers/gpu/drm/ci/xfails/i915-kbl-fails.txt
@@ -17,10 +17,12 @@ perf@i915-ref-count,Fail
perf_pmu@busy-accuracy-50,Fail
perf_pmu@module-unload,Fail
perf_pmu@rc6,Crash
+prime_busy@after,Fail
sysfs_heartbeat_interval@long,Timeout
sysfs_heartbeat_interval@off,Timeout
sysfs_preempt_timeout@off,Timeout
sysfs_timeslice_duration@off,Timeout
+testdisplay,Timeout
xe_module_load@force-load,Fail
xe_module_load@load,Fail
xe_module_load@many-reload,Fail
diff --git a/drivers/gpu/drm/ci/xfails/i915-kbl-flakes.txt b/drivers/gpu/drm/ci/xfails/i915-kbl-flakes.txt
index 6cf1fed2e575..549501e40461 100644
--- a/drivers/gpu/drm/ci/xfails/i915-kbl-flakes.txt
+++ b/drivers/gpu/drm/ci/xfails/i915-kbl-flakes.txt
@@ -1,6 +1,6 @@
# Board Name: hp-x360-14-G1-sona
# Bug Report: https://lore.kernel.org/intel-gfx/[email protected]/T/#u
+# Failure Rate: 50
# IGT Version: 1.28-g0df7b9b97
# Linux Version: 6.9.0-rc7
-# Failure Rate: 50
prime_busy@hang
diff --git a/drivers/gpu/drm/ci/xfails/i915-kbl-skips.txt b/drivers/gpu/drm/ci/xfails/i915-kbl-skips.txt
index f0cf8a6dda25..6ec2f83ffe13 100644
--- a/drivers/gpu/drm/ci/xfails/i915-kbl-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/i915-kbl-skips.txt
@@ -6,9 +6,9 @@ kms_plane_scaling@invalid-parameters
# Skip driver specific tests
^amdgpu.*
-msm_.*
+^msm.*
nouveau_.*
-panfrost_.*
+^panfrost.*
^v3d.*
^vc4.*
^vmwgfx*
diff --git a/drivers/gpu/drm/ci/xfails/i915-tgl-fails.txt b/drivers/gpu/drm/ci/xfails/i915-tgl-fails.txt
index 9a50e894c3e7..e728ccc62326 100644
--- a/drivers/gpu/drm/ci/xfails/i915-tgl-fails.txt
+++ b/drivers/gpu/drm/ci/xfails/i915-tgl-fails.txt
@@ -1,34 +1,39 @@
-api_intel_bb@blit-noreloc-keep-cache,Timeout
+api_intel_allocator@simple-allocator,Timeout
+api_intel_bb@object-reloc-keep-cache,Timeout
api_intel_bb@offset-control,Timeout
-api_intel_bb@render-ccs,Timeout
-core_getclient,Timeout
-core_hotunplug@hotreplug-lateclose,Timeout
-drm_read@short-buffer-block,Timeout
+core_auth@getclient-simple,Timeout
+core_hotunplug@hotunbind-rebind,Timeout
+debugfs_test@read_all_entries_display_on,Timeout
+drm_read@invalid-buffer,Timeout
drm_read@short-buffer-nonblock,Timeout
-dumb_buffer@map-uaf,Timeout
gen3_render_tiledx_blits,Timeout
gen7_exec_parse@basic-allocation,Timeout
gen7_exec_parse@batch-without-end,Timeout
gen9_exec_parse@batch-invalid-length,Timeout
gen9_exec_parse@bb-secure,Timeout
+gen9_exec_parse@secure-batches,Timeout
+gen9_exec_parse@shadow-peek,Timeout
+gen9_exec_parse@unaligned-jump,Timeout
i915_module_load@load,Fail
i915_module_load@reload,Fail
i915_module_load@reload-no-display,Fail
i915_module_load@resize-bar,Fail
-i915_pciid,Timeout
i915_query@engine-info,Timeout
+i915_query@query-topology-kernel-writes,Timeout
+i915_query@test-query-geometry-subslices,Timeout
kms_lease@lease-uevent,Fail
kms_rotation_crc@multiplane-rotation,Fail
perf@i915-ref-count,Fail
-perf_pmu@busy,Timeout
perf_pmu@enable-race,Timeout
perf_pmu@event-wait,Timeout
perf_pmu@gt-awake,Timeout
+perf_pmu@interrupts,Timeout
perf_pmu@module-unload,Fail
perf_pmu@rc6,Crash
prime_mmap@test_map_unmap,Timeout
+prime_mmap@test_refcounting,Timeout
prime_self_import@basic-with_one_bo,Timeout
-syncobj_basic@bad-destroy,Timeout
+syncobj_basic@bad-flags-fd-to-handle,Timeout
syncobj_eventfd@invalid-bad-pad,Timeout
syncobj_wait@invalid-multi-wait-unsubmitted-signaled,Timeout
syncobj_wait@invalid-signal-illegal-handle,Timeout
@@ -37,7 +42,9 @@ syncobj_wait@multi-wait-all-submitted,Timeout
syncobj_wait@multi-wait-for-submit-submitted-signaled,Timeout
syncobj_wait@wait-any-complex,Timeout
syncobj_wait@wait-delayed-signal,Timeout
+template@A,Timeout
xe_module_load@force-load,Fail
xe_module_load@load,Fail
+xe_module_load@many-reload,Fail
xe_module_load@reload,Fail
xe_module_load@reload-no-display,Fail
diff --git a/drivers/gpu/drm/ci/xfails/i915-tgl-skips.txt b/drivers/gpu/drm/ci/xfails/i915-tgl-skips.txt
index e600782ef96a..b47df5855e8d 100644
--- a/drivers/gpu/drm/ci/xfails/i915-tgl-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/i915-tgl-skips.txt
@@ -12,9 +12,9 @@ kms_plane_scaling@invalid-parameters
# Skip driver specific tests
^amdgpu.*
-msm_.*
+^msm.*
nouveau_.*
-panfrost_.*
+^panfrost.*
^v3d.*
^vc4.*
^vmwgfx*
diff --git a/drivers/gpu/drm/ci/xfails/i915-whl-fails.txt b/drivers/gpu/drm/ci/xfails/i915-whl-fails.txt
index 7582d313dd9b..2adae2175501 100644
--- a/drivers/gpu/drm/ci/xfails/i915-whl-fails.txt
+++ b/drivers/gpu/drm/ci/xfails/i915-whl-fails.txt
@@ -7,18 +7,10 @@ i915_module_load@resize-bar,Fail
i915_pm_rpm@gem-execbuf-stress,Timeout
i915_pm_rpm@module-reload,Fail
i915_pm_rpm@system-suspend-execbuf,Timeout
-kms_async_flips@invalid-async-flip,Timeout
-kms_atomic_transition@modeset-transition-fencing,Timeout
-kms_big_fb@linear-16bpp-rotate-0,Fail
-kms_big_fb@linear-16bpp-rotate-180,Fail
-kms_big_fb@linear-32bpp-rotate-0,Fail
-kms_big_fb@linear-32bpp-rotate-180,Fail
-kms_big_fb@linear-8bpp-rotate-0,Fail
-kms_big_fb@linear-8bpp-rotate-180,Fail
-kms_big_fb@linear-max-hw-stride-32bpp-rotate-0,Fail
kms_ccs@crc-primary-rotation-180-yf-tiled-ccs,Timeout
+kms_cursor_legacy@short-flip-before-cursor-atomic-transitions,Timeout
kms_dirtyfb@default-dirtyfb-ioctl,Fail
-kms_draw_crc@draw-method-render,Fail
+kms_dirtyfb@fbc-dirtyfb-ioctl,Fail
kms_fb_coherency@memset-crc,Crash
kms_flip_scaled_crc@flip-32bpp-linear-to-64bpp-linear-downscaling,Fail
kms_flip_scaled_crc@flip-32bpp-linear-to-64bpp-linear-upscaling,Fail
@@ -40,6 +32,7 @@ kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytile-downscaling,Fail
kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytile-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytilegen12rcccs-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytilercccs-downscaling,Fail
+kms_frontbuffer_tracking@fbc-rgb565-draw-mmap-cpu,Timeout
kms_frontbuffer_tracking@fbc-tiling-linear,Fail
kms_lease@lease-uevent,Fail
kms_plane_alpha_blend@alpha-basic,Fail
@@ -47,9 +40,13 @@ kms_plane_alpha_blend@alpha-opaque-fb,Fail
kms_plane_alpha_blend@alpha-transparent-fb,Fail
kms_plane_alpha_blend@constant-alpha-max,Fail
kms_plane_scaling@plane-scaler-with-clipping-clamping-rotation,Timeout
+kms_plane_scaling@planes-upscale-factor-0-25-downscale-factor-0-5,Timeout
kms_pm_rpm@modeset-stress-extra-wait,Timeout
kms_pm_rpm@universal-planes,Timeout
kms_pm_rpm@universal-planes-dpms,Timeout
+kms_prop_blob@invalid-set-prop,Fail
+kms_rotation_crc@primary-rotation-180,Timeout
+kms_vblank@query-forked-hang,Timeout
perf@i915-ref-count,Fail
perf_pmu@module-unload,Fail
perf_pmu@rc6,Crash
diff --git a/drivers/gpu/drm/ci/xfails/i915-whl-flakes.txt b/drivers/gpu/drm/ci/xfails/i915-whl-flakes.txt
index 1167a58c7dd1..60b8d1c64e70 100644
--- a/drivers/gpu/drm/ci/xfails/i915-whl-flakes.txt
+++ b/drivers/gpu/drm/ci/xfails/i915-whl-flakes.txt
@@ -1,6 +1,6 @@
# Board Name: dell-latitude-5400-8665U-sarien
# Bug Report: https://lore.kernel.org/intel-gfx/[email protected]/T/#u
+# Failure Rate: 50
# IGT Version: 1.28-g0df7b9b97
# Linux Version: 6.9.0-rc7
-# Failure Rate: 50
kms_pm_rpm@modeset-lpsp-stress
diff --git a/drivers/gpu/drm/ci/xfails/i915-whl-skips.txt b/drivers/gpu/drm/ci/xfails/i915-whl-skips.txt
index 20bd91525f45..29bff8922ae1 100644
--- a/drivers/gpu/drm/ci/xfails/i915-whl-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/i915-whl-skips.txt
@@ -3,9 +3,9 @@ kms_plane_scaling@invalid-parameters
# Skip driver specific tests
^amdgpu.*
-msm_.*
+^msm.*
nouveau_.*
-panfrost_.*
+^panfrost.*
^v3d.*
^vc4.*
^vmwgfx*
@@ -17,6 +17,7 @@ gem_.*
i915_pm_rc6_residency.*
i915_suspend.*
kms_flip.*
+i915_pm_rpm.*
# Kernel panic
drm_fdinfo.*
diff --git a/drivers/gpu/drm/ci/xfails/mediatek-mt8173-fails.txt b/drivers/gpu/drm/ci/xfails/mediatek-mt8173-fails.txt
index cc5e9c1c2d57..a14349a1967f 100644
--- a/drivers/gpu/drm/ci/xfails/mediatek-mt8173-fails.txt
+++ b/drivers/gpu/drm/ci/xfails/mediatek-mt8173-fails.txt
@@ -5,8 +5,15 @@ device_reset@unbind-reset-rebind,Fail
dumb_buffer@invalid-bpp,Fail
fbdev@eof,Fail
fbdev@read,Fail
-fbdev@unaligned-write,Fail
kms_3d,Fail
+kms_bw@connected-linear-tiling-1-displays-1920x1080p,Fail
+kms_bw@connected-linear-tiling-1-displays-2160x1440p,Fail
+kms_bw@connected-linear-tiling-1-displays-2560x1440p,Fail
+kms_bw@connected-linear-tiling-1-displays-3840x2160p,Fail
+kms_bw@connected-linear-tiling-2-displays-1920x1080p,Fail
+kms_bw@connected-linear-tiling-2-displays-2160x1440p,Fail
+kms_bw@connected-linear-tiling-2-displays-2560x1440p,Fail
+kms_bw@connected-linear-tiling-2-displays-3840x2160p,Fail
kms_bw@linear-tiling-1-displays-1920x1080p,Fail
kms_bw@linear-tiling-1-displays-2160x1440p,Fail
kms_bw@linear-tiling-1-displays-2560x1440p,Fail
@@ -27,4 +34,3 @@ kms_properties@get_properties-sanity-atomic,Fail
kms_properties@plane-properties-atomic,Fail
kms_properties@plane-properties-legacy,Fail
kms_rmfb@close-fd,Fail
-tools_test@tools_test,Fail
diff --git a/drivers/gpu/drm/ci/xfails/mediatek-mt8173-flakes.txt b/drivers/gpu/drm/ci/xfails/mediatek-mt8173-flakes.txt
index 395ac0463404..2e5bf6ae25f2 100644
--- a/drivers/gpu/drm/ci/xfails/mediatek-mt8173-flakes.txt
+++ b/drivers/gpu/drm/ci/xfails/mediatek-mt8173-flakes.txt
@@ -1,11 +1,41 @@
# Board Name: mt8173-elm-hana
# Bug Report: https://lore.kernel.org/linux-mediatek/[email protected]/T/#u
+# Failure Rate: 50
# IGT Version: 1.28-g0df7b9b97
# Linux Version: 6.9.0-rc7
-# Failure Rate: 50
core_setmaster_vs_auth
+
+# Board Name: mt8173-elm-hana
+# Bug Report: https://lore.kernel.org/linux-mediatek/[email protected]/T/#u
+# Failure Rate: 50
+# IGT Version: 1.28-g0df7b9b97
+# Linux Version: 6.9.0-rc7
dumb_buffer@create-clear
+
+# Board Name: mt8173-elm-hana
+# Bug Report: https://lore.kernel.org/linux-mediatek/[email protected]/T/#u
+# Failure Rate: 50
+# IGT Version: 1.28-g0df7b9b97
+# Linux Version: 6.9.0-rc7
fbdev@unaligned-write
+
+# Board Name: mt8173-elm-hana
+# Bug Report: https://lore.kernel.org/linux-mediatek/[email protected]/T/#u
+# Failure Rate: 50
+# IGT Version: 1.28-g0df7b9b97
+# Linux Version: 6.9.0-rc7
fbdev@write
+
+# Board Name: mt8173-elm-hana
+# Bug Report: https://lore.kernel.org/linux-mediatek/[email protected]/T/#u
+# Failure Rate: 50
+# IGT Version: 1.28-g0df7b9b97
+# Linux Version: 6.9.0-rc7
kms_cursor_legacy@cursor-vs-flip-atomic-transitions
+
+# Board Name: mt8173-elm-hana
+# Bug Report: https://lore.kernel.org/linux-mediatek/[email protected]/T/#u
+# Failure Rate: 50
+# IGT Version: 1.28-g0df7b9b97
+# Linux Version: 6.9.0-rc7
kms_prop_blob@invalid-set-prop
diff --git a/drivers/gpu/drm/ci/xfails/mediatek-mt8173-skips.txt b/drivers/gpu/drm/ci/xfails/mediatek-mt8173-skips.txt
index 0c6108392140..8198e06344a3 100644
--- a/drivers/gpu/drm/ci/xfails/mediatek-mt8173-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/mediatek-mt8173-skips.txt
@@ -1,8 +1,8 @@
# Skip driver specific tests
^amdgpu.*
-msm_.*
+^msm.*
nouveau_.*
-panfrost_.*
+^panfrost.*
^v3d.*
^vc4.*
^vmwgfx*
@@ -10,6 +10,7 @@ panfrost_.*
# Skip intel specific tests
gem_.*
i915_.*
+tools_test.*
# Currently fails and causes coverage loss for other tests
# since core_getversion also fails.
diff --git a/drivers/gpu/drm/ci/xfails/mediatek-mt8183-fails.txt b/drivers/gpu/drm/ci/xfails/mediatek-mt8183-fails.txt
index 9ef460646d76..8cb2cb67853d 100644
--- a/drivers/gpu/drm/ci/xfails/mediatek-mt8183-fails.txt
+++ b/drivers/gpu/drm/ci/xfails/mediatek-mt8183-fails.txt
@@ -1,8 +1,22 @@
-dumb_buffer@create-clear,Fail
-dumb_buffer@create-valid-dumb,Fail
+core_setmaster@master-drop-set-shared-fd,Fail
+device_reset@cold-reset-bound,Fail
+device_reset@reset-bound,Fail
+device_reset@unbind-cold-reset-rebind,Fail
+device_reset@unbind-reset-rebind,Fail
+dumb_buffer@create-clear,Crash
dumb_buffer@invalid-bpp,Fail
-dumb_buffer@map-invalid-size,Fail
-dumb_buffer@map-uaf,Fail
-dumb_buffer@map-valid,Fail
-panfrost_prime@gem-prime-import,Fail
-tools_test@tools_test,Fail
+fbdev@eof,Fail
+fbdev@pan,Fail
+fbdev@read,Fail
+fbdev@unaligned-read,Fail
+kms_bw@connected-linear-tiling-1-displays-1920x1080p,Fail
+kms_bw@connected-linear-tiling-1-displays-2160x1440p,Fail
+kms_bw@connected-linear-tiling-1-displays-2560x1440p,Fail
+kms_bw@linear-tiling-1-displays-1920x1080p,Fail
+kms_bw@linear-tiling-1-displays-3840x2160p,Fail
+kms_color@invalid-gamma-lut-sizes,Fail
+kms_flip@flip-vs-panning-vs-hang,Fail
+kms_flip@flip-vs-suspend,Fail
+kms_lease@lease-uevent,Fail
+kms_properties@plane-properties-atomic,Fail
+kms_rmfb@close-fd,Fail
diff --git a/drivers/gpu/drm/ci/xfails/mediatek-mt8183-flakes.txt b/drivers/gpu/drm/ci/xfails/mediatek-mt8183-flakes.txt
new file mode 100644
index 000000000000..df7e5ce7a036
--- /dev/null
+++ b/drivers/gpu/drm/ci/xfails/mediatek-mt8183-flakes.txt
@@ -0,0 +1,20 @@
+# Board Name: mt8183-kukui-jacuzzi-juniper-sku16
+# Bug Report: https://lore.kernel.org/linux-mediatek/[email protected]/T/#u
+# Failure Rate: 100
+# IGT Version: 1.28-gf13702b8e
+# Linux Version: 6.10.0-rc5
+kms_bw@linear-tiling-1-displays-2560x1440p
+
+# Board Name: mt8183-kukui-jacuzzi-juniper-sku16
+# Bug Report: https://lore.kernel.org/linux-mediatek/[email protected]/T/#u
+# Failure Rate: 100
+# IGT Version: 1.28-gf13702b8e
+# Linux Version: 6.10.0-rc5
+kms_cursor_legacy@cursor-vs-flip-atomic-transitions
+
+# Board Name: mt8183-kukui-jacuzzi-juniper-sku16
+# Bug Report: https://lore.kernel.org/linux-mediatek/[email protected]/T/#u
+# Failure Rate: 100
+# IGT Version: 1.28-gf13702b8e
+# Linux Version: 6.10.0-rc5
+fbdev@write
diff --git a/drivers/gpu/drm/ci/xfails/mediatek-mt8183-skips.txt b/drivers/gpu/drm/ci/xfails/mediatek-mt8183-skips.txt
index 715b9a8f4997..8198e06344a3 100644
--- a/drivers/gpu/drm/ci/xfails/mediatek-mt8183-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/mediatek-mt8183-skips.txt
@@ -1,7 +1,8 @@
# Skip driver specific tests
^amdgpu.*
-msm_.*
+^msm.*
nouveau_.*
+^panfrost.*
^v3d.*
^vc4.*
^vmwgfx*
@@ -9,9 +10,7 @@ nouveau_.*
# Skip intel specific tests
gem_.*
i915_.*
-
-# Panfrost is not a KMS driver, so skip the KMS tests
-kms_.*
+tools_test.*
# Currently fails and causes coverage loss for other tests
# since core_getversion also fails.
diff --git a/drivers/gpu/drm/ci/xfails/meson-g12b-fails.txt b/drivers/gpu/drm/ci/xfails/meson-g12b-fails.txt
index 9ef460646d76..328967d3e23d 100644
--- a/drivers/gpu/drm/ci/xfails/meson-g12b-fails.txt
+++ b/drivers/gpu/drm/ci/xfails/meson-g12b-fails.txt
@@ -1,8 +1,13 @@
-dumb_buffer@create-clear,Fail
-dumb_buffer@create-valid-dumb,Fail
dumb_buffer@invalid-bpp,Fail
-dumb_buffer@map-invalid-size,Fail
-dumb_buffer@map-uaf,Fail
-dumb_buffer@map-valid,Fail
-panfrost_prime@gem-prime-import,Fail
-tools_test@tools_test,Fail
+kms_3d,Fail
+kms_cursor_legacy@forked-bo,Fail
+kms_cursor_legacy@forked-move,Fail
+kms_cursor_legacy@single-bo,Fail
+kms_cursor_legacy@single-move,Fail
+kms_cursor_legacy@torture-bo,Fail
+kms_cursor_legacy@torture-move,Fail
+kms_lease@lease-uevent,Fail
+kms_properties@connector-properties-atomic,Fail
+kms_properties@connector-properties-legacy,Fail
+kms_properties@get_properties-sanity-atomic,Fail
+kms_properties@get_properties-sanity-non-atomic,Fail
diff --git a/drivers/gpu/drm/ci/xfails/meson-g12b-skips.txt b/drivers/gpu/drm/ci/xfails/meson-g12b-skips.txt
index 715b9a8f4997..8198e06344a3 100644
--- a/drivers/gpu/drm/ci/xfails/meson-g12b-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/meson-g12b-skips.txt
@@ -1,7 +1,8 @@
# Skip driver specific tests
^amdgpu.*
-msm_.*
+^msm.*
nouveau_.*
+^panfrost.*
^v3d.*
^vc4.*
^vmwgfx*
@@ -9,9 +10,7 @@ nouveau_.*
# Skip intel specific tests
gem_.*
i915_.*
-
-# Panfrost is not a KMS driver, so skip the KMS tests
-kms_.*
+tools_test.*
# Currently fails and causes coverage loss for other tests
# since core_getversion also fails.
diff --git a/drivers/gpu/drm/ci/xfails/msm-apq8016-fails.txt b/drivers/gpu/drm/ci/xfails/msm-apq8016-fails.txt
index 6e7fd1ccd1e3..4ac46168eff3 100644
--- a/drivers/gpu/drm/ci/xfails/msm-apq8016-fails.txt
+++ b/drivers/gpu/drm/ci/xfails/msm-apq8016-fails.txt
@@ -4,12 +4,8 @@ device_reset@unbind-cold-reset-rebind,Fail
device_reset@unbind-reset-rebind,Fail
dumb_buffer@invalid-bpp,Fail
kms_3d,Fail
-kms_cursor_legacy@forked-move,Fail
-kms_cursor_legacy@single-bo,Fail
kms_cursor_legacy@torture-bo,Fail
-kms_cursor_legacy@torture-move,Fail
kms_force_connector_basic@force-edid,Fail
kms_hdmi_inject@inject-4k,Fail
kms_lease@lease-uevent,Fail
-msm_mapping@ring,Fail
-tools_test@tools_test,Fail
+msm/msm_mapping@ring,Fail
diff --git a/drivers/gpu/drm/ci/xfails/msm-apq8016-skips.txt b/drivers/gpu/drm/ci/xfails/msm-apq8016-skips.txt
index ff12202abb6e..1674c8e214d6 100644
--- a/drivers/gpu/drm/ci/xfails/msm-apq8016-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/msm-apq8016-skips.txt
@@ -1,7 +1,7 @@
# Skip driver specific tests
^amdgpu.*
nouveau_.*
-panfrost_.*
+^panfrost.*
^v3d.*
^vc4.*
^vmwgfx*
@@ -9,6 +9,7 @@ panfrost_.*
# Skip intel specific tests
gem_.*
i915_.*
+tools_test.*
# Currently fails and causes coverage loss for other tests
# since core_getversion also fails.
diff --git a/drivers/gpu/drm/ci/xfails/msm-apq8096-fails.txt b/drivers/gpu/drm/ci/xfails/msm-apq8096-fails.txt
index 46ca69ce2ffe..bd0653caf7a0 100644
--- a/drivers/gpu/drm/ci/xfails/msm-apq8096-fails.txt
+++ b/drivers/gpu/drm/ci/xfails/msm-apq8096-fails.txt
@@ -5,4 +5,3 @@ device_reset@unbind-reset-rebind,Fail
dumb_buffer@invalid-bpp,Fail
kms_3d,Fail
kms_lease@lease-uevent,Fail
-tools_test@tools_test,Fail
diff --git a/drivers/gpu/drm/ci/xfails/msm-apq8096-flakes.txt b/drivers/gpu/drm/ci/xfails/msm-apq8096-flakes.txt
index a275584c8bbb..123d92cb4470 100644
--- a/drivers/gpu/drm/ci/xfails/msm-apq8096-flakes.txt
+++ b/drivers/gpu/drm/ci/xfails/msm-apq8096-flakes.txt
@@ -1,6 +1,6 @@
# Board Name: apq8096-db820c
# Bug Report: https://lore.kernel.org/linux-arm-msm/[email protected]/T/#u
+# Failure Rate: 50
# IGT Version: 1.28-g0df7b9b97
# Linux Version: 6.9.0-rc7
-# Failure Rate: 50
dumb_buffer@create-clear
diff --git a/drivers/gpu/drm/ci/xfails/msm-apq8096-skips.txt b/drivers/gpu/drm/ci/xfails/msm-apq8096-skips.txt
index 1c45fc6c512d..5550be5486ed 100644
--- a/drivers/gpu/drm/ci/xfails/msm-apq8096-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/msm-apq8096-skips.txt
@@ -4,7 +4,7 @@ kms_cursor_legacy@all-pipes-torture-move
# Skip driver specific tests
^amdgpu.*
nouveau_.*
-panfrost_.*
+^panfrost.*
^v3d.*
^vc4.*
^vmwgfx*
@@ -12,6 +12,7 @@ panfrost_.*
# Skip intel specific tests
gem_.*
i915_.*
+tools_test.*
# Currently fails and causes coverage loss for other tests
# since core_getversion also fails.
@@ -23,4 +24,4 @@ core_hotunplug.*
# *** gpu fault: ttbr0=00000001030ea000 iova=0000000001074000 dir=WRITE type=PERMISSION source=1f030000 (0,0,0,0)
# msm_mdp 901000.display-controller: RBBM | ME master split | status=0x701000B0
# watchdog: BUG: soft lockup - CPU#0 stuck for 26s! [kworker/u16:3:46]
-msm_mapping@shadow
+msm/msm_mapping@shadow
diff --git a/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-kingoftown-fails.txt b/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-kingoftown-fails.txt
index eb7a3886d397..d42004cd6977 100644
--- a/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-kingoftown-fails.txt
+++ b/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-kingoftown-fails.txt
@@ -3,13 +3,11 @@ device_reset@reset-bound,Fail
device_reset@unbind-cold-reset-rebind,Fail
device_reset@unbind-reset-rebind,Fail
dumb_buffer@invalid-bpp,Fail
-kms_atomic_transition@plane-primary-toggle-with-vblank-wait,Fail
kms_color@ctm-0-25,Fail
kms_color@ctm-0-50,Fail
kms_color@ctm-0-75,Fail
kms_color@ctm-blue-to-red,Fail
kms_color@ctm-green-to-red,Fail
-kms_color@ctm-max,Fail
kms_color@ctm-negative,Fail
kms_color@ctm-red-to-blue,Fail
kms_color@ctm-signed,Fail
@@ -21,72 +19,6 @@ kms_content_protection@lic-type-1,Crash
kms_content_protection@srm,Crash
kms_content_protection@type1,Crash
kms_content_protection@uevent,Crash
-kms_cursor_crc@cursor-alpha-opaque,Fail
-kms_cursor_crc@cursor-alpha-transparent,Fail
-kms_cursor_crc@cursor-dpms,Fail
-kms_cursor_crc@cursor-offscreen-128x128,Fail
-kms_cursor_crc@cursor-offscreen-128x42,Fail
-kms_cursor_crc@cursor-offscreen-256x256,Fail
-kms_cursor_crc@cursor-offscreen-256x85,Fail
-kms_cursor_crc@cursor-offscreen-32x10,Fail
-kms_cursor_crc@cursor-offscreen-32x32,Fail
-kms_cursor_crc@cursor-offscreen-512x170,Fail
-kms_cursor_crc@cursor-offscreen-512x512,Fail
-kms_cursor_crc@cursor-offscreen-64x21,Fail
-kms_cursor_crc@cursor-offscreen-64x64,Fail
-kms_cursor_crc@cursor-onscreen-128x128,Fail
-kms_cursor_crc@cursor-onscreen-128x42,Fail
-kms_cursor_crc@cursor-onscreen-256x256,Fail
-kms_cursor_crc@cursor-onscreen-256x85,Fail
-kms_cursor_crc@cursor-onscreen-32x10,Fail
-kms_cursor_crc@cursor-onscreen-32x32,Fail
-kms_cursor_crc@cursor-onscreen-512x170,Fail
-kms_cursor_crc@cursor-onscreen-512x512,Fail
-kms_cursor_crc@cursor-onscreen-64x21,Fail
-kms_cursor_crc@cursor-onscreen-64x64,Fail
-kms_cursor_crc@cursor-random-128x128,Fail
-kms_cursor_crc@cursor-random-128x42,Fail
-kms_cursor_crc@cursor-random-256x256,Fail
-kms_cursor_crc@cursor-random-256x85,Fail
-kms_cursor_crc@cursor-random-32x10,Fail
-kms_cursor_crc@cursor-random-32x32,Fail
-kms_cursor_crc@cursor-random-512x170,Fail
-kms_cursor_crc@cursor-random-512x512,Fail
-kms_cursor_crc@cursor-random-64x21,Fail
-kms_cursor_crc@cursor-random-64x64,Fail
-kms_cursor_crc@cursor-rapid-movement-128x128,Fail
-kms_cursor_crc@cursor-rapid-movement-128x42,Fail
-kms_cursor_crc@cursor-rapid-movement-256x256,Fail
-kms_cursor_crc@cursor-rapid-movement-256x85,Fail
-kms_cursor_crc@cursor-rapid-movement-32x10,Fail
-kms_cursor_crc@cursor-rapid-movement-32x32,Fail
-kms_cursor_crc@cursor-rapid-movement-512x170,Fail
-kms_cursor_crc@cursor-rapid-movement-512x512,Fail
-kms_cursor_crc@cursor-rapid-movement-64x21,Fail
-kms_cursor_crc@cursor-rapid-movement-64x64,Fail
-kms_cursor_crc@cursor-size-change,Fail
-kms_cursor_crc@cursor-sliding-128x128,Fail
-kms_cursor_crc@cursor-sliding-128x42,Fail
-kms_cursor_crc@cursor-sliding-256x256,Fail
-kms_cursor_crc@cursor-sliding-256x85,Fail
-kms_cursor_crc@cursor-sliding-32x10,Fail
-kms_cursor_crc@cursor-sliding-32x32,Fail
-kms_cursor_crc@cursor-sliding-512x170,Fail
-kms_cursor_crc@cursor-sliding-512x512,Fail
-kms_cursor_crc@cursor-sliding-64x21,Fail
-kms_cursor_crc@cursor-sliding-64x64,Fail
-kms_cursor_edge_walk@128x128-left-edge,Fail
-kms_cursor_edge_walk@128x128-right-edge,Fail
-kms_cursor_edge_walk@128x128-top-bottom,Fail
-kms_cursor_edge_walk@128x128-top-edge,Fail
-kms_cursor_edge_walk@256x256-left-edge,Fail
-kms_cursor_edge_walk@256x256-right-edge,Fail
-kms_cursor_edge_walk@256x256-top-bottom,Fail
-kms_cursor_edge_walk@256x256-top-edge,Fail
-kms_cursor_edge_walk@64x64-left-edge,Fail
-kms_cursor_edge_walk@64x64-right-edge,Fail
-kms_cursor_edge_walk@64x64-top-bottom,Fail
-kms_cursor_edge_walk@64x64-top-edge,Fail
kms_cursor_legacy@2x-cursor-vs-flip-atomic,Fail
kms_cursor_legacy@2x-cursor-vs-flip-legacy,Fail
kms_cursor_legacy@2x-flip-vs-cursor-atomic,Fail
@@ -100,92 +32,14 @@ kms_cursor_legacy@cursor-vs-flip-varying-size,Fail
kms_display_modes@extended-mode-basic,Fail
kms_flip@2x-flip-vs-modeset-vs-hang,Fail
kms_flip@2x-flip-vs-panning-vs-hang,Fail
-kms_flip@absolute-wf_vblank,Fail
-kms_flip@absolute-wf_vblank-interruptible,Fail
-kms_flip@basic-flip-vs-wf_vblank,Fail
-kms_flip@basic-plain-flip,Fail
-kms_flip@blocking-absolute-wf_vblank,Fail
-kms_flip@blocking-absolute-wf_vblank-interruptible,Fail
-kms_flip@blocking-wf_vblank,Fail
-kms_flip@busy-flip,Fail
-kms_flip@dpms-off-confusion,Fail
-kms_flip@dpms-off-confusion-interruptible,Fail
-kms_flip@dpms-vs-vblank-race,Fail
-kms_flip@dpms-vs-vblank-race-interruptible,Fail
-kms_flip@flip-vs-absolute-wf_vblank,Fail
-kms_flip@flip-vs-absolute-wf_vblank-interruptible,Fail
-kms_flip@flip-vs-blocking-wf-vblank,Fail
-kms_flip@flip-vs-expired-vblank,Fail
-kms_flip@flip-vs-expired-vblank-interruptible,Fail
kms_flip@flip-vs-modeset-vs-hang,Fail
-kms_flip@flip-vs-panning,Fail
-kms_flip@flip-vs-panning-interruptible,Fail
kms_flip@flip-vs-panning-vs-hang,Fail
-kms_flip@flip-vs-rmfb,Fail
-kms_flip@flip-vs-rmfb-interruptible,Fail
-kms_flip@flip-vs-wf_vblank-interruptible,Fail
-kms_flip@modeset-vs-vblank-race,Fail
-kms_flip@modeset-vs-vblank-race-interruptible,Fail
-kms_flip@plain-flip-fb-recreate,Fail
-kms_flip@plain-flip-fb-recreate-interruptible,Fail
-kms_flip@plain-flip-interruptible,Fail
-kms_flip@plain-flip-ts-check,Fail
-kms_flip@plain-flip-ts-check-interruptible,Fail
-kms_flip@wf_vblank-ts-check,Fail
-kms_flip@wf_vblank-ts-check-interruptible,Fail
-kms_lease@cursor-implicit-plane,Fail
kms_lease@lease-uevent,Fail
-kms_lease@page-flip-implicit-plane,Fail
-kms_lease@setcrtc-implicit-plane,Fail
-kms_lease@simple-lease,Fail
kms_multipipe_modeset@basic-max-pipe-crc-check,Fail
kms_pipe_crc_basic@compare-crc-sanitycheck-nv12,Fail
-kms_pipe_crc_basic@compare-crc-sanitycheck-xr24,Fail
-kms_pipe_crc_basic@disable-crc-after-crtc,Fail
-kms_pipe_crc_basic@nonblocking-crc,Fail
-kms_pipe_crc_basic@nonblocking-crc-frame-sequence,Fail
-kms_pipe_crc_basic@read-crc,Fail
-kms_pipe_crc_basic@read-crc-frame-sequence,Fail
-kms_plane@pixel-format,Fail
-kms_plane@pixel-format-source-clamping,Fail
-kms_plane@plane-panning-bottom-right,Fail
-kms_plane@plane-panning-top-left,Fail
-kms_plane@plane-position-covered,Fail
-kms_plane@plane-position-hole,Fail
-kms_plane@plane-position-hole-dpms,Fail
kms_plane_alpha_blend@alpha-7efc,Fail
-kms_plane_alpha_blend@alpha-basic,Fail
-kms_plane_alpha_blend@alpha-opaque-fb,Fail
-kms_plane_alpha_blend@alpha-transparent-fb,Fail
-kms_plane_alpha_blend@constant-alpha-max,Fail
-kms_plane_alpha_blend@constant-alpha-mid,Fail
-kms_plane_alpha_blend@constant-alpha-min,Fail
kms_plane_alpha_blend@coverage-7efc,Fail
kms_plane_alpha_blend@coverage-vs-premult-vs-constant,Fail
-kms_plane_cursor@primary,Fail
kms_plane_lowres@tiling-none,Fail
-kms_plane_multiple@tiling-none,Fail
kms_rmfb@close-fd,Fail
-kms_rotation_crc@cursor-rotation-180,Fail
-kms_rotation_crc@primary-rotation-180,Fail
-kms_sequence@get-busy,Fail
-kms_sequence@get-forked,Fail
-kms_sequence@get-forked-busy,Fail
-kms_sequence@get-idle,Fail
-kms_sequence@queue-busy,Fail
-kms_sequence@queue-idle,Fail
-kms_vblank@accuracy-idle,Fail
-kms_vblank@crtc-id,Fail
-kms_vblank@query-busy,Fail
-kms_vblank@query-forked,Fail
-kms_vblank@query-forked-busy,Fail
-kms_vblank@query-idle,Fail
kms_vblank@ts-continuation-dpms-rpm,Fail
-kms_vblank@ts-continuation-idle,Fail
-kms_vblank@ts-continuation-modeset,Fail
-kms_vblank@ts-continuation-modeset-rpm,Fail
-kms_vblank@wait-busy,Fail
-kms_vblank@wait-forked,Fail
-kms_vblank@wait-forked-busy,Fail
-kms_vblank@wait-idle,Fail
-tools_test@tools_test,Fail
diff --git a/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-kingoftown-flakes.txt b/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-kingoftown-flakes.txt
index 6dec63d48cfb..d74e04405e65 100644
--- a/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-kingoftown-flakes.txt
+++ b/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-kingoftown-flakes.txt
@@ -1,8 +1,20 @@
# Board Name: sc7180-trogdor-kingoftown
# Bug Report: https://lore.kernel.org/linux-arm-msm/[email protected]/T/#u
+# Failure Rate: 50
+# IGT Version: 1.28-g0df7b9b97
+# Linux Version: 6.9.0-rc7
+msm/msm_mapping@shadow
+
+# Board Name: sc7180-trogdor-kingoftown
+# Bug Report: https://lore.kernel.org/linux-arm-msm/[email protected]/T/#u
+# Failure Rate: 50
# IGT Version: 1.28-g0df7b9b97
# Linux Version: 6.9.0-rc7
+msm/msm_shrink@copy-gpu-oom-32
+
+# Board Name: sc7180-trogdor-kingoftown
+# Bug Report: https://lore.kernel.org/linux-arm-msm/[email protected]/T/#u
# Failure Rate: 50
-msm_mapping@shadow
-msm_shrink@copy-gpu-oom-32
-msm_shrink@copy-gpu-oom-8
+# IGT Version: 1.28-g0df7b9b97
+# Linux Version: 6.9.0-rc7
+msm/msm_shrink@copy-gpu-oom-8
diff --git a/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-kingoftown-skips.txt b/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-kingoftown-skips.txt
index 68c96005ba54..c2833eee1c4b 100644
--- a/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-kingoftown-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-kingoftown-skips.txt
@@ -4,7 +4,7 @@
# Skip driver specific tests
^amdgpu.*
nouveau_.*
-panfrost_.*
+^panfrost.*
^v3d.*
^vc4.*
^vmwgfx*
@@ -12,6 +12,7 @@ panfrost_.*
# Skip intel specific tests
gem_.*
i915_.*
+tools_test.*
# Currently fails and causes coverage loss for other tests
# since core_getversion also fails.
@@ -19,3 +20,6 @@ core_hotunplug.*
# Timeout occurs
kms_flip@2x-wf_vblank-ts-check
+
+# Hangs the machine
+kms_cursor_crc@cursor-random-max-size
diff --git a/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-lazor-limozeen-fails.txt b/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-lazor-limozeen-fails.txt
index eb7a3886d397..d42004cd6977 100644
--- a/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-lazor-limozeen-fails.txt
+++ b/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-lazor-limozeen-fails.txt
@@ -3,13 +3,11 @@ device_reset@reset-bound,Fail
device_reset@unbind-cold-reset-rebind,Fail
device_reset@unbind-reset-rebind,Fail
dumb_buffer@invalid-bpp,Fail
-kms_atomic_transition@plane-primary-toggle-with-vblank-wait,Fail
kms_color@ctm-0-25,Fail
kms_color@ctm-0-50,Fail
kms_color@ctm-0-75,Fail
kms_color@ctm-blue-to-red,Fail
kms_color@ctm-green-to-red,Fail
-kms_color@ctm-max,Fail
kms_color@ctm-negative,Fail
kms_color@ctm-red-to-blue,Fail
kms_color@ctm-signed,Fail
@@ -21,72 +19,6 @@ kms_content_protection@lic-type-1,Crash
kms_content_protection@srm,Crash
kms_content_protection@type1,Crash
kms_content_protection@uevent,Crash
-kms_cursor_crc@cursor-alpha-opaque,Fail
-kms_cursor_crc@cursor-alpha-transparent,Fail
-kms_cursor_crc@cursor-dpms,Fail
-kms_cursor_crc@cursor-offscreen-128x128,Fail
-kms_cursor_crc@cursor-offscreen-128x42,Fail
-kms_cursor_crc@cursor-offscreen-256x256,Fail
-kms_cursor_crc@cursor-offscreen-256x85,Fail
-kms_cursor_crc@cursor-offscreen-32x10,Fail
-kms_cursor_crc@cursor-offscreen-32x32,Fail
-kms_cursor_crc@cursor-offscreen-512x170,Fail
-kms_cursor_crc@cursor-offscreen-512x512,Fail
-kms_cursor_crc@cursor-offscreen-64x21,Fail
-kms_cursor_crc@cursor-offscreen-64x64,Fail
-kms_cursor_crc@cursor-onscreen-128x128,Fail
-kms_cursor_crc@cursor-onscreen-128x42,Fail
-kms_cursor_crc@cursor-onscreen-256x256,Fail
-kms_cursor_crc@cursor-onscreen-256x85,Fail
-kms_cursor_crc@cursor-onscreen-32x10,Fail
-kms_cursor_crc@cursor-onscreen-32x32,Fail
-kms_cursor_crc@cursor-onscreen-512x170,Fail
-kms_cursor_crc@cursor-onscreen-512x512,Fail
-kms_cursor_crc@cursor-onscreen-64x21,Fail
-kms_cursor_crc@cursor-onscreen-64x64,Fail
-kms_cursor_crc@cursor-random-128x128,Fail
-kms_cursor_crc@cursor-random-128x42,Fail
-kms_cursor_crc@cursor-random-256x256,Fail
-kms_cursor_crc@cursor-random-256x85,Fail
-kms_cursor_crc@cursor-random-32x10,Fail
-kms_cursor_crc@cursor-random-32x32,Fail
-kms_cursor_crc@cursor-random-512x170,Fail
-kms_cursor_crc@cursor-random-512x512,Fail
-kms_cursor_crc@cursor-random-64x21,Fail
-kms_cursor_crc@cursor-random-64x64,Fail
-kms_cursor_crc@cursor-rapid-movement-128x128,Fail
-kms_cursor_crc@cursor-rapid-movement-128x42,Fail
-kms_cursor_crc@cursor-rapid-movement-256x256,Fail
-kms_cursor_crc@cursor-rapid-movement-256x85,Fail
-kms_cursor_crc@cursor-rapid-movement-32x10,Fail
-kms_cursor_crc@cursor-rapid-movement-32x32,Fail
-kms_cursor_crc@cursor-rapid-movement-512x170,Fail
-kms_cursor_crc@cursor-rapid-movement-512x512,Fail
-kms_cursor_crc@cursor-rapid-movement-64x21,Fail
-kms_cursor_crc@cursor-rapid-movement-64x64,Fail
-kms_cursor_crc@cursor-size-change,Fail
-kms_cursor_crc@cursor-sliding-128x128,Fail
-kms_cursor_crc@cursor-sliding-128x42,Fail
-kms_cursor_crc@cursor-sliding-256x256,Fail
-kms_cursor_crc@cursor-sliding-256x85,Fail
-kms_cursor_crc@cursor-sliding-32x10,Fail
-kms_cursor_crc@cursor-sliding-32x32,Fail
-kms_cursor_crc@cursor-sliding-512x170,Fail
-kms_cursor_crc@cursor-sliding-512x512,Fail
-kms_cursor_crc@cursor-sliding-64x21,Fail
-kms_cursor_crc@cursor-sliding-64x64,Fail
-kms_cursor_edge_walk@128x128-left-edge,Fail
-kms_cursor_edge_walk@128x128-right-edge,Fail
-kms_cursor_edge_walk@128x128-top-bottom,Fail
-kms_cursor_edge_walk@128x128-top-edge,Fail
-kms_cursor_edge_walk@256x256-left-edge,Fail
-kms_cursor_edge_walk@256x256-right-edge,Fail
-kms_cursor_edge_walk@256x256-top-bottom,Fail
-kms_cursor_edge_walk@256x256-top-edge,Fail
-kms_cursor_edge_walk@64x64-left-edge,Fail
-kms_cursor_edge_walk@64x64-right-edge,Fail
-kms_cursor_edge_walk@64x64-top-bottom,Fail
-kms_cursor_edge_walk@64x64-top-edge,Fail
kms_cursor_legacy@2x-cursor-vs-flip-atomic,Fail
kms_cursor_legacy@2x-cursor-vs-flip-legacy,Fail
kms_cursor_legacy@2x-flip-vs-cursor-atomic,Fail
@@ -100,92 +32,14 @@ kms_cursor_legacy@cursor-vs-flip-varying-size,Fail
kms_display_modes@extended-mode-basic,Fail
kms_flip@2x-flip-vs-modeset-vs-hang,Fail
kms_flip@2x-flip-vs-panning-vs-hang,Fail
-kms_flip@absolute-wf_vblank,Fail
-kms_flip@absolute-wf_vblank-interruptible,Fail
-kms_flip@basic-flip-vs-wf_vblank,Fail
-kms_flip@basic-plain-flip,Fail
-kms_flip@blocking-absolute-wf_vblank,Fail
-kms_flip@blocking-absolute-wf_vblank-interruptible,Fail
-kms_flip@blocking-wf_vblank,Fail
-kms_flip@busy-flip,Fail
-kms_flip@dpms-off-confusion,Fail
-kms_flip@dpms-off-confusion-interruptible,Fail
-kms_flip@dpms-vs-vblank-race,Fail
-kms_flip@dpms-vs-vblank-race-interruptible,Fail
-kms_flip@flip-vs-absolute-wf_vblank,Fail
-kms_flip@flip-vs-absolute-wf_vblank-interruptible,Fail
-kms_flip@flip-vs-blocking-wf-vblank,Fail
-kms_flip@flip-vs-expired-vblank,Fail
-kms_flip@flip-vs-expired-vblank-interruptible,Fail
kms_flip@flip-vs-modeset-vs-hang,Fail
-kms_flip@flip-vs-panning,Fail
-kms_flip@flip-vs-panning-interruptible,Fail
kms_flip@flip-vs-panning-vs-hang,Fail
-kms_flip@flip-vs-rmfb,Fail
-kms_flip@flip-vs-rmfb-interruptible,Fail
-kms_flip@flip-vs-wf_vblank-interruptible,Fail
-kms_flip@modeset-vs-vblank-race,Fail
-kms_flip@modeset-vs-vblank-race-interruptible,Fail
-kms_flip@plain-flip-fb-recreate,Fail
-kms_flip@plain-flip-fb-recreate-interruptible,Fail
-kms_flip@plain-flip-interruptible,Fail
-kms_flip@plain-flip-ts-check,Fail
-kms_flip@plain-flip-ts-check-interruptible,Fail
-kms_flip@wf_vblank-ts-check,Fail
-kms_flip@wf_vblank-ts-check-interruptible,Fail
-kms_lease@cursor-implicit-plane,Fail
kms_lease@lease-uevent,Fail
-kms_lease@page-flip-implicit-plane,Fail
-kms_lease@setcrtc-implicit-plane,Fail
-kms_lease@simple-lease,Fail
kms_multipipe_modeset@basic-max-pipe-crc-check,Fail
kms_pipe_crc_basic@compare-crc-sanitycheck-nv12,Fail
-kms_pipe_crc_basic@compare-crc-sanitycheck-xr24,Fail
-kms_pipe_crc_basic@disable-crc-after-crtc,Fail
-kms_pipe_crc_basic@nonblocking-crc,Fail
-kms_pipe_crc_basic@nonblocking-crc-frame-sequence,Fail
-kms_pipe_crc_basic@read-crc,Fail
-kms_pipe_crc_basic@read-crc-frame-sequence,Fail
-kms_plane@pixel-format,Fail
-kms_plane@pixel-format-source-clamping,Fail
-kms_plane@plane-panning-bottom-right,Fail
-kms_plane@plane-panning-top-left,Fail
-kms_plane@plane-position-covered,Fail
-kms_plane@plane-position-hole,Fail
-kms_plane@plane-position-hole-dpms,Fail
kms_plane_alpha_blend@alpha-7efc,Fail
-kms_plane_alpha_blend@alpha-basic,Fail
-kms_plane_alpha_blend@alpha-opaque-fb,Fail
-kms_plane_alpha_blend@alpha-transparent-fb,Fail
-kms_plane_alpha_blend@constant-alpha-max,Fail
-kms_plane_alpha_blend@constant-alpha-mid,Fail
-kms_plane_alpha_blend@constant-alpha-min,Fail
kms_plane_alpha_blend@coverage-7efc,Fail
kms_plane_alpha_blend@coverage-vs-premult-vs-constant,Fail
-kms_plane_cursor@primary,Fail
kms_plane_lowres@tiling-none,Fail
-kms_plane_multiple@tiling-none,Fail
kms_rmfb@close-fd,Fail
-kms_rotation_crc@cursor-rotation-180,Fail
-kms_rotation_crc@primary-rotation-180,Fail
-kms_sequence@get-busy,Fail
-kms_sequence@get-forked,Fail
-kms_sequence@get-forked-busy,Fail
-kms_sequence@get-idle,Fail
-kms_sequence@queue-busy,Fail
-kms_sequence@queue-idle,Fail
-kms_vblank@accuracy-idle,Fail
-kms_vblank@crtc-id,Fail
-kms_vblank@query-busy,Fail
-kms_vblank@query-forked,Fail
-kms_vblank@query-forked-busy,Fail
-kms_vblank@query-idle,Fail
kms_vblank@ts-continuation-dpms-rpm,Fail
-kms_vblank@ts-continuation-idle,Fail
-kms_vblank@ts-continuation-modeset,Fail
-kms_vblank@ts-continuation-modeset-rpm,Fail
-kms_vblank@wait-busy,Fail
-kms_vblank@wait-forked,Fail
-kms_vblank@wait-forked-busy,Fail
-kms_vblank@wait-idle,Fail
-tools_test@tools_test,Fail
diff --git a/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-lazor-limozeen-flakes.txt b/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-lazor-limozeen-flakes.txt
index dcb24b835dc3..cd3d3b0befe4 100644
--- a/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-lazor-limozeen-flakes.txt
+++ b/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-lazor-limozeen-flakes.txt
@@ -1,6 +1,13 @@
# Board Name: sc7180-trogdor-lazor-limozeen-nots-r5
# Bug Report: https://lore.kernel.org/linux-arm-msm/[email protected]/T/#u
+# Failure Rate: 50
# IGT Version: 1.28-g0df7b9b97
# Linux Version: 6.9.0-rc7
-# Failure Rate: 50
-msm_mapping@shadow
+msm/msm_mapping@shadow
+
+# Board Name: sc7180-trogdor-lazor-limozeen-nots-r5
+# Bug Report: https://lore.kernel.org/linux-arm-msm/[email protected]/T/#u
+# Failure Rate: 100
+# IGT Version: 1.28-gf13702b8e
+# Linux Version: 6.10.0-rc5
+kms_lease@page-flip-implicit-plane
diff --git a/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-lazor-limozeen-skips.txt b/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-lazor-limozeen-skips.txt
index 1168c53acd2d..7c69c1f1d55b 100644
--- a/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-lazor-limozeen-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-lazor-limozeen-skips.txt
@@ -4,7 +4,7 @@
# Skip driver specific tests
^amdgpu.*
nouveau_.*
-panfrost_.*
+^panfrost.*
^v3d.*
^vc4.*
^vmwgfx*
@@ -12,6 +12,7 @@ panfrost_.*
# Skip intel specific tests
gem_.*
i915_.*
+tools_test.*
# Currently fails and causes coverage loss for other tests
# since core_getversion also fails.
diff --git a/drivers/gpu/drm/ci/xfails/msm-sdm845-fails.txt b/drivers/gpu/drm/ci/xfails/msm-sdm845-fails.txt
index 8f010c8a9c4f..770a1c685fde 100644
--- a/drivers/gpu/drm/ci/xfails/msm-sdm845-fails.txt
+++ b/drivers/gpu/drm/ci/xfails/msm-sdm845-fails.txt
@@ -33,4 +33,3 @@ kms_plane_alpha_blend@coverage-vs-premult-vs-constant,Fail
kms_plane_cursor@overlay,Fail
kms_plane_cursor@viewport,Fail
kms_rmfb@close-fd,Fail
-tools_test@tools_test,Fail
diff --git a/drivers/gpu/drm/ci/xfails/msm-sdm845-flakes.txt b/drivers/gpu/drm/ci/xfails/msm-sdm845-flakes.txt
index 2c5f62b07632..2aa96b1241c3 100644
--- a/drivers/gpu/drm/ci/xfails/msm-sdm845-flakes.txt
+++ b/drivers/gpu/drm/ci/xfails/msm-sdm845-flakes.txt
@@ -1,19 +1,118 @@
# Board Name: sdm845-cheza-r3
# Bug Report: https://lore.kernel.org/linux-arm-msm/[email protected]/T/#u
+# Failure Rate: 50
# IGT Version: 1.28-g0df7b9b97
# Linux Version: 6.9.0-rc7
-# Failure Rate: 50
kms_cursor_legacy@basic-flip-after-cursor-atomic
+
+# Board Name: sdm845-cheza-r3
+# Bug Report: https://lore.kernel.org/linux-arm-msm/[email protected]/T/#u
+# Failure Rate: 50
+# IGT Version: 1.28-g0df7b9b97
+# Linux Version: 6.9.0-rc7
kms_cursor_legacy@basic-flip-after-cursor-legacy
+
+# Board Name: sdm845-cheza-r3
+# Bug Report: https://lore.kernel.org/linux-arm-msm/[email protected]/T/#u
+# Failure Rate: 50
+# IGT Version: 1.28-g0df7b9b97
+# Linux Version: 6.9.0-rc7
kms_cursor_legacy@basic-flip-after-cursor-varying-size
+
+# Board Name: sdm845-cheza-r3
+# Bug Report: https://lore.kernel.org/linux-arm-msm/[email protected]/T/#u
+# Failure Rate: 50
+# IGT Version: 1.28-g0df7b9b97
+# Linux Version: 6.9.0-rc7
kms_cursor_legacy@basic-flip-before-cursor-varying-size
+
+# Board Name: sdm845-cheza-r3
+# Bug Report: https://lore.kernel.org/linux-arm-msm/[email protected]/T/#u
+# Failure Rate: 50
+# IGT Version: 1.28-g0df7b9b97
+# Linux Version: 6.9.0-rc7
kms_cursor_legacy@flip-vs-cursor-atomic-transitions
+
+# Board Name: sdm845-cheza-r3
+# Bug Report: https://lore.kernel.org/linux-arm-msm/[email protected]/T/#u
+# Failure Rate: 50
+# IGT Version: 1.28-g0df7b9b97
+# Linux Version: 6.9.0-rc7
kms_cursor_legacy@flip-vs-cursor-atomic-transitions-varying-size
+
+# Board Name: sdm845-cheza-r3
+# Bug Report: https://lore.kernel.org/linux-arm-msm/[email protected]/T/#u
+# Failure Rate: 50
+# IGT Version: 1.28-g0df7b9b97
+# Linux Version: 6.9.0-rc7
kms_cursor_legacy@flip-vs-cursor-varying-size
+
+# Board Name: sdm845-cheza-r3
+# Bug Report: https://lore.kernel.org/linux-arm-msm/[email protected]/T/#u
+# Failure Rate: 50
+# IGT Version: 1.28-g0df7b9b97
+# Linux Version: 6.9.0-rc7
kms_cursor_legacy@short-flip-after-cursor-atomic-transitions
+
+# Board Name: sdm845-cheza-r3
+# Bug Report: https://lore.kernel.org/linux-arm-msm/[email protected]/T/#u
+# Failure Rate: 50
+# IGT Version: 1.28-g0df7b9b97
+# Linux Version: 6.9.0-rc7
kms_cursor_legacy@short-flip-after-cursor-atomic-transitions-varying-size
+
+# Board Name: sdm845-cheza-r3
+# Bug Report: https://lore.kernel.org/linux-arm-msm/[email protected]/T/#u
+# Failure Rate: 50
+# IGT Version: 1.28-g0df7b9b97
+# Linux Version: 6.9.0-rc7
kms_cursor_legacy@short-flip-after-cursor-toggle
+
+# Board Name: sdm845-cheza-r3
+# Bug Report: https://lore.kernel.org/linux-arm-msm/[email protected]/T/#u
+# Failure Rate: 50
+# IGT Version: 1.28-g0df7b9b97
+# Linux Version: 6.9.0-rc7
kms_cursor_legacy@short-flip-before-cursor-atomic-transitions
+
+# Board Name: sdm845-cheza-r3
+# Bug Report: https://lore.kernel.org/linux-arm-msm/[email protected]/T/#u
+# Failure Rate: 50
+# IGT Version: 1.28-g0df7b9b97
+# Linux Version: 6.9.0-rc7
kms_cursor_legacy@short-flip-before-cursor-atomic-transitions-varying-size
-msm_shrink@copy-gpu-32
-msm_shrink@copy-gpu-oom-32
+
+# Board Name: sdm845-cheza-r3
+# Bug Report: https://lore.kernel.org/linux-arm-msm/[email protected]/T/#u
+# Failure Rate: 50
+# IGT Version: 1.28-g0df7b9b97
+# Linux Version: 6.9.0-rc7
+msm/msm_shrink@copy-gpu-32
+
+# Board Name: sdm845-cheza-r3
+# Bug Report: https://lore.kernel.org/linux-arm-msm/[email protected]/T/#u
+# Failure Rate: 50
+# IGT Version: 1.28-g0df7b9b97
+# Linux Version: 6.9.0-rc7
+msm/msm_shrink@copy-gpu-oom-32
+
+# Board Name: sdm845-cheza-r3
+# Bug Report: https://lore.kernel.org/linux-arm-msm/[email protected]/T/#u
+# Failure Rate: 50
+# IGT Version: 1.28-gf13702b8e
+# Linux Version: 6.10.0-rc5
+kms_cursor_legacy@short-flip-before-cursor-toggle
+
+# Board Name: sdm845-cheza-r3
+# Bug Report: https://lore.kernel.org/linux-arm-msm/[email protected]/T/#u
+# Failure Rate: 50
+# IGT Version: 1.28-gf13702b8e
+# Linux Version: 6.10.0-rc5
+kms_cursor_legacy@flip-vs-cursor-toggle
+
+# Board Name: sdm845-cheza-r3
+# Bug Report: https://lore.kernel.org/linux-arm-msm/[email protected]/T/#u
+# Failure Rate: 50
+# IGT Version: 1.28-gf13702b8e
+# Linux Version: 6.10.0-rc5
+msm/msm_shrink@copy-mmap-oom-8
diff --git a/drivers/gpu/drm/ci/xfails/msm-sdm845-skips.txt b/drivers/gpu/drm/ci/xfails/msm-sdm845-skips.txt
index 5185212c8fb2..90651048ab61 100644
--- a/drivers/gpu/drm/ci/xfails/msm-sdm845-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/msm-sdm845-skips.txt
@@ -4,12 +4,12 @@ kms_bw.*
# Failing due to a bootloader/fw issue. The workaround in mesa CI involves these two patches
# https://gitlab.freedesktop.org/gfx-ci/linux/-/commit/4b49f902ec6f2bb382cbbf489870573f4b43371e
# https://gitlab.freedesktop.org/gfx-ci/linux/-/commit/38cdf4c5559771e2474ae0fecef8469f65147bc1
-msm_mapping@*
+msm/msm_mapping@*
# Skip driver specific tests
^amdgpu.*
nouveau_.*
-panfrost_.*
+^panfrost.*
^v3d.*
^vc4.*
^vmwgfx*
@@ -17,6 +17,7 @@ panfrost_.*
# Skip intel specific tests
gem_.*
i915_.*
+tools_test.*
# Currently fails and causes coverage loss for other tests
# since core_getversion also fails.
diff --git a/drivers/gpu/drm/ci/xfails/panfrost-g12b-fails.txt b/drivers/gpu/drm/ci/xfails/panfrost-g12b-fails.txt
new file mode 100644
index 000000000000..fe8ce2ce33e6
--- /dev/null
+++ b/drivers/gpu/drm/ci/xfails/panfrost-g12b-fails.txt
@@ -0,0 +1 @@
+panfrost/panfrost_prime@gem-prime-import,Fail
diff --git a/drivers/gpu/drm/ci/xfails/panfrost-g12b-skips.txt b/drivers/gpu/drm/ci/xfails/panfrost-g12b-skips.txt
new file mode 100644
index 000000000000..3c7e494857b5
--- /dev/null
+++ b/drivers/gpu/drm/ci/xfails/panfrost-g12b-skips.txt
@@ -0,0 +1,23 @@
+# Skip driver specific tests
+^amdgpu.*
+^msm.*
+nouveau_.*
+^v3d.*
+^vc4.*
+^vmwgfx*
+
+# Skip intel specific tests
+gem_.*
+i915_.*
+tools_test.*
+
+# Panfrost is not a KMS driver, so skip the KMS tests
+kms_.*
+
+# Skip display functionality tests for GPU-only drivers
+dumb_buffer.*
+fbdev.*
+
+# Currently fails and causes coverage loss for other tests
+# since core_getversion also fails.
+core_hotunplug.*
diff --git a/drivers/gpu/drm/ci/xfails/panfrost-mt8183-fails.txt b/drivers/gpu/drm/ci/xfails/panfrost-mt8183-fails.txt
new file mode 100644
index 000000000000..fe8ce2ce33e6
--- /dev/null
+++ b/drivers/gpu/drm/ci/xfails/panfrost-mt8183-fails.txt
@@ -0,0 +1 @@
+panfrost/panfrost_prime@gem-prime-import,Fail
diff --git a/drivers/gpu/drm/ci/xfails/panfrost-mt8183-skips.txt b/drivers/gpu/drm/ci/xfails/panfrost-mt8183-skips.txt
new file mode 100644
index 000000000000..3c7e494857b5
--- /dev/null
+++ b/drivers/gpu/drm/ci/xfails/panfrost-mt8183-skips.txt
@@ -0,0 +1,23 @@
+# Skip driver specific tests
+^amdgpu.*
+^msm.*
+nouveau_.*
+^v3d.*
+^vc4.*
+^vmwgfx*
+
+# Skip intel specific tests
+gem_.*
+i915_.*
+tools_test.*
+
+# Panfrost is not a KMS driver, so skip the KMS tests
+kms_.*
+
+# Skip display functionality tests for GPU-only drivers
+dumb_buffer.*
+fbdev.*
+
+# Currently fails and causes coverage loss for other tests
+# since core_getversion also fails.
+core_hotunplug.*
diff --git a/drivers/gpu/drm/ci/xfails/panfrost-rk3288-fails.txt b/drivers/gpu/drm/ci/xfails/panfrost-rk3288-fails.txt
new file mode 100644
index 000000000000..4a2f4b6b14c1
--- /dev/null
+++ b/drivers/gpu/drm/ci/xfails/panfrost-rk3288-fails.txt
@@ -0,0 +1 @@
+panfrost/panfrost_prime@gem-prime-import,Crash
diff --git a/drivers/gpu/drm/ci/xfails/panfrost-rk3288-skips.txt b/drivers/gpu/drm/ci/xfails/panfrost-rk3288-skips.txt
new file mode 100644
index 000000000000..feeed89b6c3f
--- /dev/null
+++ b/drivers/gpu/drm/ci/xfails/panfrost-rk3288-skips.txt
@@ -0,0 +1,26 @@
+# Suspend to RAM seems to be broken on this machine
+.*suspend.*
+
+# Skip driver specific tests
+^amdgpu.*
+^msm.*
+nouveau_.*
+^v3d.*
+^vc4.*
+^vmwgfx*
+
+# Skip intel specific tests
+gem_.*
+i915_.*
+tools_test.*
+
+# Panfrost is not a KMS driver, so skip the KMS tests
+kms_.*
+
+# Skip display functionality tests for GPU-only drivers
+dumb_buffer.*
+fbdev.*
+
+# Currently fails and causes coverage loss for other tests
+# since core_getversion also fails.
+core_hotunplug.*
diff --git a/drivers/gpu/drm/ci/xfails/panfrost-rk3399-fails.txt b/drivers/gpu/drm/ci/xfails/panfrost-rk3399-fails.txt
new file mode 100644
index 000000000000..fe8ce2ce33e6
--- /dev/null
+++ b/drivers/gpu/drm/ci/xfails/panfrost-rk3399-fails.txt
@@ -0,0 +1 @@
+panfrost/panfrost_prime@gem-prime-import,Fail
diff --git a/drivers/gpu/drm/ci/xfails/panfrost-rk3399-flakes.txt b/drivers/gpu/drm/ci/xfails/panfrost-rk3399-flakes.txt
new file mode 100644
index 000000000000..ac4f8f7244d4
--- /dev/null
+++ b/drivers/gpu/drm/ci/xfails/panfrost-rk3399-flakes.txt
@@ -0,0 +1,6 @@
+# Board Name: rk3399-gru-kevin
+# Bug Report: https://lore.kernel.org/dri-devel/[email protected]/T/#u
+# Failure Rate: 50
+# IGT Version: 1.28-g0df7b9b97
+# Linux Version: 6.9.0-rc7
+panfrost/panfrost_submit@pan-unhandled-pagefault
diff --git a/drivers/gpu/drm/ci/xfails/panfrost-rk3399-skips.txt b/drivers/gpu/drm/ci/xfails/panfrost-rk3399-skips.txt
new file mode 100644
index 000000000000..feeed89b6c3f
--- /dev/null
+++ b/drivers/gpu/drm/ci/xfails/panfrost-rk3399-skips.txt
@@ -0,0 +1,26 @@
+# Suspend to RAM seems to be broken on this machine
+.*suspend.*
+
+# Skip driver specific tests
+^amdgpu.*
+^msm.*
+nouveau_.*
+^v3d.*
+^vc4.*
+^vmwgfx*
+
+# Skip intel specific tests
+gem_.*
+i915_.*
+tools_test.*
+
+# Panfrost is not a KMS driver, so skip the KMS tests
+kms_.*
+
+# Skip display functionality tests for GPU-only drivers
+dumb_buffer.*
+fbdev.*
+
+# Currently fails and causes coverage loss for other tests
+# since core_getversion also fails.
+core_hotunplug.*
diff --git a/drivers/gpu/drm/ci/xfails/rockchip-rk3288-fails.txt b/drivers/gpu/drm/ci/xfails/rockchip-rk3288-fails.txt
index f9b99bf27105..ea7b2ceb95b9 100644
--- a/drivers/gpu/drm/ci/xfails/rockchip-rk3288-fails.txt
+++ b/drivers/gpu/drm/ci/xfails/rockchip-rk3288-fails.txt
@@ -1,8 +1,18 @@
+core_setmaster@master-drop-set-root,Crash
+core_setmaster@master-drop-set-user,Crash
+core_setmaster_vs_auth,Crash
+device_reset@cold-reset-bound,Crash
+device_reset@reset-bound,Crash
+device_reset@unbind-cold-reset-rebind,Crash
+device_reset@unbind-reset-rebind,Crash
dumb_buffer@create-clear,Crash
-dumb_buffer@create-valid-dumb,Crash
dumb_buffer@invalid-bpp,Crash
-dumb_buffer@map-invalid-size,Crash
-dumb_buffer@map-uaf,Crash
-dumb_buffer@map-valid,Crash
-panfrost_prime@gem-prime-import,Crash
-tools_test@tools_test,Crash
+fbdev@pan,Crash
+kms_cursor_crc@cursor-onscreen-32x10,Crash
+kms_cursor_crc@cursor-onscreen-32x32,Crash
+kms_cursor_crc@cursor-random-32x10,Crash
+kms_cursor_crc@cursor-sliding-32x32,Crash
+kms_cursor_legacy@basic-flip-before-cursor-atomic,Fail
+kms_cursor_legacy@cursor-vs-flip-legacy,Fail
+kms_prop_blob@invalid-set-prop,Crash
+kms_prop_blob@invalid-set-prop-any,Crash
diff --git a/drivers/gpu/drm/ci/xfails/rockchip-rk3288-flakes.txt b/drivers/gpu/drm/ci/xfails/rockchip-rk3288-flakes.txt
new file mode 100644
index 000000000000..7ede273aab20
--- /dev/null
+++ b/drivers/gpu/drm/ci/xfails/rockchip-rk3288-flakes.txt
@@ -0,0 +1,6 @@
+# Board Name: rk3288-veyron-jaq
+# Bug Report: https://lore.kernel.org/linux-rockchip/[email protected]/T/#u
+# Failure Rate: 100
+# IGT Version: 1.28-gf13702b8e
+# Linux Version: 6.10.0-rc5
+kms_cursor_legacy@flip-vs-cursor-atomic
diff --git a/drivers/gpu/drm/ci/xfails/rockchip-rk3288-skips.txt b/drivers/gpu/drm/ci/xfails/rockchip-rk3288-skips.txt
index 6d3757dca83b..eb16b29dee48 100644
--- a/drivers/gpu/drm/ci/xfails/rockchip-rk3288-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/rockchip-rk3288-skips.txt
@@ -1,60 +1,11 @@
# Suspend to RAM seems to be broken on this machine
.*suspend.*
-# Too unstable, machine ends up hanging after lots of Oopses
-kms_cursor_legacy.*
-
-# Started hanging the machine on Linux 5.19-rc2:
-#
-# [IGT] kms_plane_lowres: executing
-# [IGT] kms_plane_lowres: starting subtest pipe-F-tiling-y
-# [IGT] kms_plane_lowres: exiting, ret=77
-# Console: switching to colour frame buffer device 170x48
-# rockchip-drm display-subsystem: [drm] *ERROR* flip_done timed out
-# rockchip-drm display-subsystem: [drm] *ERROR* [CRTC:35:crtc-0] commit wait timed out
-# BUG: spinlock bad magic on CPU#3, kms_plane_lowre/482
-# 8<--- cut here ---
-# Unable to handle kernel paging request at virtual address 7812078e
-# [7812078e] *pgd=00000000
-# Internal error: Oops: 5 [#1] SMP ARM
-# Modules linked in:
-# CPU: 3 PID: 482 Comm: kms_plane_lowre Tainted: G W 5.19.0-rc2-323596-g00535de92171 #1
-# Hardware name: Rockchip (Device Tree)
-# Process kms_plane_lowre (pid: 482, stack limit = 0x1193ac2b)
-# spin_dump from do_raw_spin_lock+0xa4/0xe8
-# do_raw_spin_lock from wait_for_completion_timeout+0x2c/0x120
-# wait_for_completion_timeout from drm_crtc_commit_wait+0x18/0x7c
-# drm_crtc_commit_wait from drm_atomic_helper_wait_for_dependencies+0x44/0x168
-# drm_atomic_helper_wait_for_dependencies from commit_tail+0x34/0x180
-# commit_tail from drm_atomic_helper_commit+0x164/0x18c
-# drm_atomic_helper_commit from drm_atomic_commit+0xac/0xe4
-# drm_atomic_commit from drm_client_modeset_commit_atomic+0x23c/0x284
-# drm_client_modeset_commit_atomic from drm_client_modeset_commit_locked+0x60/0x1c8
-# drm_client_modeset_commit_locked from drm_client_modeset_commit+0x24/0x40
-# drm_client_modeset_commit from drm_fbdev_client_restore+0x58/0x94
-# drm_fbdev_client_restore from drm_client_dev_restore+0x70/0xbc
-# drm_client_dev_restore from drm_release+0xf4/0x114
-# drm_release from __fput+0x74/0x240
-# __fput from task_work_run+0x84/0xb4
-# task_work_run from do_exit+0x34c/0xa20
-# do_exit from do_group_exit+0x34/0x98
-# do_group_exit from __wake_up_parent+0x0/0x18
-# Code: e595c008 12843d19 03e00000 03093168 (15940508)
-# ---[ end trace 0000000000000000 ]---
-# note: kms_plane_lowre[482] exited with preempt_count 1
-# Fixing recursive fault but reboot is needed!
-kms_plane_lowres@pipe-F-tiling-y
-
-# Take too long, we have only two machines, and these are very flaky
-kms_cursor_crc.*
-
-# Machine is hanging in this test, so skip it
-kms_pipe_crc_basic@disable-crc-after-crtc
-
# Skip driver specific tests
^amdgpu.*
-msm_.*
+^msm.*
nouveau_.*
+^panfrost.*
^v3d.*
^vc4.*
^vmwgfx*
@@ -62,9 +13,7 @@ nouveau_.*
# Skip intel specific tests
gem_.*
i915_.*
-
-# Panfrost is not a KMS driver, so skip the KMS tests
-kms_.*
+tools_test.*
# Currently fails and causes coverage loss for other tests
# since core_getversion also fails.
diff --git a/drivers/gpu/drm/ci/xfails/rockchip-rk3399-fails.txt b/drivers/gpu/drm/ci/xfails/rockchip-rk3399-fails.txt
index 9ef460646d76..9309ff15e23a 100644
--- a/drivers/gpu/drm/ci/xfails/rockchip-rk3399-fails.txt
+++ b/drivers/gpu/drm/ci/xfails/rockchip-rk3399-fails.txt
@@ -1,8 +1,84 @@
-dumb_buffer@create-clear,Fail
-dumb_buffer@create-valid-dumb,Fail
+device_reset@cold-reset-bound,Fail
+device_reset@reset-bound,Fail
+device_reset@unbind-cold-reset-rebind,Fail
+device_reset@unbind-reset-rebind,Fail
+dumb_buffer@create-clear,Crash
dumb_buffer@invalid-bpp,Fail
-dumb_buffer@map-invalid-size,Fail
-dumb_buffer@map-uaf,Fail
-dumb_buffer@map-valid,Fail
-panfrost_prime@gem-prime-import,Fail
-tools_test@tools_test,Fail
+kms_atomic_transition@modeset-transition,Fail
+kms_atomic_transition@modeset-transition-fencing,Fail
+kms_atomic_transition@plane-toggle-modeset-transition,Fail
+kms_color@gamma,Fail
+kms_color@legacy-gamma,Fail
+kms_cursor_crc@cursor-alpha-opaque,Fail
+kms_cursor_crc@cursor-alpha-transparent,Fail
+kms_cursor_crc@cursor-dpms,Fail
+kms_cursor_crc@cursor-offscreen-32x10,Fail
+kms_cursor_crc@cursor-offscreen-32x32,Fail
+kms_cursor_crc@cursor-offscreen-64x21,Fail
+kms_cursor_crc@cursor-offscreen-64x64,Fail
+kms_cursor_crc@cursor-onscreen-32x10,Fail
+kms_cursor_crc@cursor-onscreen-32x32,Fail
+kms_cursor_crc@cursor-onscreen-64x21,Fail
+kms_cursor_crc@cursor-onscreen-64x64,Fail
+kms_cursor_crc@cursor-random-32x10,Fail
+kms_cursor_crc@cursor-random-32x32,Fail
+kms_cursor_crc@cursor-random-64x21,Fail
+kms_cursor_crc@cursor-random-64x64,Fail
+kms_cursor_crc@cursor-rapid-movement-32x10,Fail
+kms_cursor_crc@cursor-rapid-movement-32x32,Fail
+kms_cursor_crc@cursor-rapid-movement-64x21,Fail
+kms_cursor_crc@cursor-rapid-movement-64x64,Fail
+kms_cursor_crc@cursor-size-change,Fail
+kms_cursor_crc@cursor-sliding-32x10,Fail
+kms_cursor_crc@cursor-sliding-32x32,Fail
+kms_cursor_crc@cursor-sliding-64x21,Fail
+kms_cursor_crc@cursor-sliding-64x64,Fail
+kms_cursor_edge_walk@64x64-left-edge,Fail
+kms_cursor_legacy@basic-flip-before-cursor-atomic,Fail
+kms_cursor_legacy@basic-flip-before-cursor-legacy,Fail
+kms_cursor_legacy@cursor-vs-flip-atomic,Fail
+kms_cursor_legacy@cursor-vs-flip-legacy,Fail
+kms_cursor_legacy@cursor-vs-flip-toggle,Fail
+kms_cursor_legacy@flip-vs-cursor-atomic,Fail
+kms_cursor_legacy@flip-vs-cursor-crc-atomic,Fail
+kms_cursor_legacy@flip-vs-cursor-crc-legacy,Fail
+kms_cursor_legacy@flip-vs-cursor-legacy,Fail
+kms_cursor_legacy@long-nonblocking-modeset-vs-cursor-atomic,Fail
+kms_flip@basic-flip-vs-wf_vblank,Fail
+kms_flip@blocking-wf_vblank,Fail
+kms_flip@dpms-vs-vblank-race,Fail
+kms_flip@flip-vs-absolute-wf_vblank,Fail
+kms_flip@flip-vs-blocking-wf-vblank,Fail
+kms_flip@flip-vs-modeset-vs-hang,Fail
+kms_flip@flip-vs-panning,Fail
+kms_flip@flip-vs-panning-interruptible,Fail
+kms_flip@flip-vs-panning-vs-hang,Fail
+kms_flip@modeset-vs-vblank-race,Fail
+kms_flip@modeset-vs-vblank-race-interruptible,Fail
+kms_flip@plain-flip-fb-recreate,Fail
+kms_flip@plain-flip-fb-recreate-interruptible,Fail
+kms_flip@plain-flip-ts-check,Fail
+kms_flip@plain-flip-ts-check-interruptible,Fail
+kms_flip@wf_vblank-ts-check,Fail
+kms_flip@wf_vblank-ts-check-interruptible,Fail
+kms_invalid_mode@int-max-clock,Fail
+kms_lease@lease-uevent,Fail
+kms_lease@page-flip-implicit-plane,Fail
+kms_pipe_crc_basic@compare-crc-sanitycheck-nv12,Fail
+kms_pipe_crc_basic@compare-crc-sanitycheck-xr24,Fail
+kms_pipe_crc_basic@disable-crc-after-crtc,Fail
+kms_pipe_crc_basic@nonblocking-crc,Fail
+kms_pipe_crc_basic@nonblocking-crc-frame-sequence,Fail
+kms_pipe_crc_basic@read-crc,Fail
+kms_pipe_crc_basic@read-crc-frame-sequence,Fail
+kms_plane@pixel-format,Crash
+kms_plane@pixel-format-source-clamping,Crash
+kms_plane@plane-panning-bottom-right,Fail
+kms_plane@plane-panning-top-left,Fail
+kms_plane@plane-position-covered,Fail
+kms_plane@plane-position-hole,Fail
+kms_plane@plane-position-hole-dpms,Fail
+kms_plane_cursor@primary,Fail
+kms_plane_multiple@tiling-none,Fail
+kms_rmfb@close-fd,Fail
+kms_universal_plane@universal-plane-functional,Fail
diff --git a/drivers/gpu/drm/ci/xfails/rockchip-rk3399-flakes.txt b/drivers/gpu/drm/ci/xfails/rockchip-rk3399-flakes.txt
index 742c27d9a598..d98f6a17343c 100644
--- a/drivers/gpu/drm/ci/xfails/rockchip-rk3399-flakes.txt
+++ b/drivers/gpu/drm/ci/xfails/rockchip-rk3399-flakes.txt
@@ -1,6 +1,48 @@
# Board Name: rk3399-gru-kevin
-# Bug Report: https://lore.kernel.org/dri-devel/[email protected]/T/#u
-# IGT Version: 1.28-g0df7b9b97
-# Linux Version: 6.9.0-rc7
+# Bug Report: https://lore.kernel.org/linux-rockchip/[email protected]/T/#u
# Failure Rate: 50
-panfrost_submit@pan-unhandled-pagefault
+# IGT Version: 1.28-gf13702b8e
+# Linux Version: 6.10.0-rc5
+kms_bw@linear-tiling-1-displays-2560x1440p
+
+# Board Name: rk3399-gru-kevin
+# Bug Report: https://lore.kernel.org/linux-rockchip/[email protected]/T/#u
+# Failure Rate: 50
+# IGT Version: 1.28-gf13702b8e
+# Linux Version: 6.10.0-rc5
+kms_cursor_legacy@nonblocking-modeset-vs-cursor-atomic
+
+# Board Name: rk3399-gru-kevin
+# Bug Report: https://lore.kernel.org/linux-rockchip/[email protected]/T/#u
+# Failure Rate: 50
+# IGT Version: 1.28-gf13702b8e
+# Linux Version: 6.10.0-rc5
+kms_flip@dpms-vs-vblank-race-interruptible
+
+# Board Name: rk3399-gru-kevin
+# Bug Report: https://lore.kernel.org/linux-rockchip/[email protected]/T/#u
+# Failure Rate: 50
+# IGT Version: 1.28-gf13702b8e
+# Linux Version: 6.10.0-rc5
+kms_flip@flip-vs-absolute-wf_vblank-interruptible
+
+# Board Name: rk3399-gru-kevin
+# Bug Report: https://lore.kernel.org/linux-rockchip/[email protected]/T/#u
+# Failure Rate: 50
+# IGT Version: 1.28-gf13702b8e
+# Linux Version: 6.10.0-rc5
+kms_flip@flip-vs-wf_vblank-interruptible
+
+# Board Name: rk3399-gru-kevin
+# Bug Report: https://lore.kernel.org/linux-rockchip/[email protected]/T/#u
+# Failure Rate: 50
+# IGT Version: 1.28-gf13702b8e
+# Linux Version: 6.10.0-rc5
+kms_setmode@basic
+
+# Board Name: rk3399-gru-kevin
+# Bug Report: https://lore.kernel.org/linux-rockchip/[email protected]/T/#u
+# Failure Rate: 50
+# IGT Version: 1.28-gf13702b8e
+# Linux Version: 6.10.0-rc5
+kms_bw@connected-linear-tiling-1-displays-2560x1440p
diff --git a/drivers/gpu/drm/ci/xfails/rockchip-rk3399-skips.txt b/drivers/gpu/drm/ci/xfails/rockchip-rk3399-skips.txt
index 5c52b25b4213..eb16b29dee48 100644
--- a/drivers/gpu/drm/ci/xfails/rockchip-rk3399-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/rockchip-rk3399-skips.txt
@@ -1,13 +1,11 @@
# Suspend to RAM seems to be broken on this machine
.*suspend.*
-# Too unstable, machine ends up hanging after lots of Oopses
-kms_cursor_legacy.*
-
# Skip driver specific tests
^amdgpu.*
-msm_.*
+^msm.*
nouveau_.*
+^panfrost.*
^v3d.*
^vc4.*
^vmwgfx*
@@ -15,9 +13,7 @@ nouveau_.*
# Skip intel specific tests
gem_.*
i915_.*
-
-# Panfrost is not a KMS driver, so skip the KMS tests
-kms_.*
+tools_test.*
# Currently fails and causes coverage loss for other tests
# since core_getversion also fails.
diff --git a/drivers/gpu/drm/ci/xfails/virtio_gpu-none-fails.txt b/drivers/gpu/drm/ci/xfails/virtio_gpu-none-fails.txt
index fdf09fe11566..c72fee70e739 100644
--- a/drivers/gpu/drm/ci/xfails/virtio_gpu-none-fails.txt
+++ b/drivers/gpu/drm/ci/xfails/virtio_gpu-none-fails.txt
@@ -3,6 +3,70 @@ kms_addfb_basic@bo-too-small,Fail
kms_addfb_basic@size-max,Fail
kms_addfb_basic@too-high,Fail
kms_atomic_transition@plane-primary-toggle-with-vblank-wait,Fail
+kms_bw@connected-linear-tiling-1-displays-1920x1080p,Fail
+kms_bw@connected-linear-tiling-1-displays-2160x1440p,Fail
+kms_bw@connected-linear-tiling-1-displays-2560x1440p,Fail
+kms_bw@connected-linear-tiling-1-displays-3840x2160p,Fail
+kms_bw@connected-linear-tiling-10-displays-1920x1080p,Fail
+kms_bw@connected-linear-tiling-10-displays-2160x1440p,Fail
+kms_bw@connected-linear-tiling-10-displays-2560x1440p,Fail
+kms_bw@connected-linear-tiling-10-displays-3840x2160p,Fail
+kms_bw@connected-linear-tiling-11-displays-1920x1080p,Fail
+kms_bw@connected-linear-tiling-11-displays-2160x1440p,Fail
+kms_bw@connected-linear-tiling-11-displays-2560x1440p,Fail
+kms_bw@connected-linear-tiling-11-displays-3840x2160p,Fail
+kms_bw@connected-linear-tiling-12-displays-1920x1080p,Fail
+kms_bw@connected-linear-tiling-12-displays-2160x1440p,Fail
+kms_bw@connected-linear-tiling-12-displays-2560x1440p,Fail
+kms_bw@connected-linear-tiling-12-displays-3840x2160p,Fail
+kms_bw@connected-linear-tiling-13-displays-1920x1080p,Fail
+kms_bw@connected-linear-tiling-13-displays-2160x1440p,Fail
+kms_bw@connected-linear-tiling-13-displays-2560x1440p,Fail
+kms_bw@connected-linear-tiling-13-displays-3840x2160p,Fail
+kms_bw@connected-linear-tiling-14-displays-1920x1080p,Fail
+kms_bw@connected-linear-tiling-14-displays-2160x1440p,Fail
+kms_bw@connected-linear-tiling-14-displays-2560x1440p,Fail
+kms_bw@connected-linear-tiling-14-displays-3840x2160p,Fail
+kms_bw@connected-linear-tiling-15-displays-1920x1080p,Fail
+kms_bw@connected-linear-tiling-15-displays-2160x1440p,Fail
+kms_bw@connected-linear-tiling-15-displays-2560x1440p,Fail
+kms_bw@connected-linear-tiling-15-displays-3840x2160p,Fail
+kms_bw@connected-linear-tiling-16-displays-1920x1080p,Fail
+kms_bw@connected-linear-tiling-16-displays-2160x1440p,Fail
+kms_bw@connected-linear-tiling-16-displays-2560x1440p,Fail
+kms_bw@connected-linear-tiling-16-displays-3840x2160p,Fail
+kms_bw@connected-linear-tiling-2-displays-1920x1080p,Fail
+kms_bw@connected-linear-tiling-2-displays-2160x1440p,Fail
+kms_bw@connected-linear-tiling-2-displays-2560x1440p,Fail
+kms_bw@connected-linear-tiling-2-displays-3840x2160p,Fail
+kms_bw@connected-linear-tiling-3-displays-1920x1080p,Fail
+kms_bw@connected-linear-tiling-3-displays-2160x1440p,Fail
+kms_bw@connected-linear-tiling-3-displays-2560x1440p,Fail
+kms_bw@connected-linear-tiling-3-displays-3840x2160p,Fail
+kms_bw@connected-linear-tiling-4-displays-1920x1080p,Fail
+kms_bw@connected-linear-tiling-4-displays-2160x1440p,Fail
+kms_bw@connected-linear-tiling-4-displays-2560x1440p,Fail
+kms_bw@connected-linear-tiling-4-displays-3840x2160p,Fail
+kms_bw@connected-linear-tiling-5-displays-1920x1080p,Fail
+kms_bw@connected-linear-tiling-5-displays-2160x1440p,Fail
+kms_bw@connected-linear-tiling-5-displays-2560x1440p,Fail
+kms_bw@connected-linear-tiling-5-displays-3840x2160p,Fail
+kms_bw@connected-linear-tiling-6-displays-1920x1080p,Fail
+kms_bw@connected-linear-tiling-6-displays-2160x1440p,Fail
+kms_bw@connected-linear-tiling-6-displays-2560x1440p,Fail
+kms_bw@connected-linear-tiling-6-displays-3840x2160p,Fail
+kms_bw@connected-linear-tiling-7-displays-1920x1080p,Fail
+kms_bw@connected-linear-tiling-7-displays-2160x1440p,Fail
+kms_bw@connected-linear-tiling-7-displays-2560x1440p,Fail
+kms_bw@connected-linear-tiling-7-displays-3840x2160p,Fail
+kms_bw@connected-linear-tiling-8-displays-1920x1080p,Fail
+kms_bw@connected-linear-tiling-8-displays-2160x1440p,Fail
+kms_bw@connected-linear-tiling-8-displays-2560x1440p,Fail
+kms_bw@connected-linear-tiling-8-displays-3840x2160p,Fail
+kms_bw@connected-linear-tiling-9-displays-1920x1080p,Fail
+kms_bw@connected-linear-tiling-9-displays-2160x1440p,Fail
+kms_bw@connected-linear-tiling-9-displays-2560x1440p,Fail
+kms_bw@connected-linear-tiling-9-displays-3840x2160p,Fail
kms_bw@linear-tiling-1-displays-1920x1080p,Fail
kms_bw@linear-tiling-1-displays-2160x1440p,Fail
kms_bw@linear-tiling-1-displays-2560x1440p,Fail
@@ -123,4 +187,3 @@ kms_vblank@wait-forked,Fail
kms_vblank@wait-forked-busy,Fail
kms_vblank@wait-idle,Fail
perf@i915-ref-count,Fail
-tools_test@tools_test,Fail
diff --git a/drivers/gpu/drm/ci/xfails/virtio_gpu-none-skips.txt b/drivers/gpu/drm/ci/xfails/virtio_gpu-none-skips.txt
index e0ca4fadb84f..9c9e048725f8 100644
--- a/drivers/gpu/drm/ci/xfails/virtio_gpu-none-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/virtio_gpu-none-skips.txt
@@ -7,9 +7,9 @@ kms_flip@flip-vs-suspend.*
# Skip driver specific tests
^amdgpu.*
-msm_.*
+^msm.*
nouveau_.*
-panfrost_.*
+^panfrost.*
^v3d.*
^vc4.*
^vmwgfx*
@@ -18,6 +18,7 @@ panfrost_.*
gem_.*
i915_.*
xe_.*
+tools_test.*
# Currently fails and causes coverage loss for other tests
# since core_getversion also fails.
diff --git a/drivers/gpu/drm/ci/xfails/vkms-none-fails.txt b/drivers/gpu/drm/ci/xfails/vkms-none-fails.txt
index 691c383b21a0..5408110f4c60 100644
--- a/drivers/gpu/drm/ci/xfails/vkms-none-fails.txt
+++ b/drivers/gpu/drm/ci/xfails/vkms-none-fails.txt
@@ -41,12 +41,8 @@ kms_cursor_legacy@flip-vs-cursor-crc-legacy,Fail
kms_cursor_legacy@flip-vs-cursor-legacy,Fail
kms_flip@flip-vs-modeset-vs-hang,Fail
kms_flip@flip-vs-panning-vs-hang,Fail
-kms_flip@flip-vs-suspend,Timeout
-kms_flip@flip-vs-suspend-interruptible,Timeout
-kms_flip@plain-flip-fb-recreate,Fail
kms_lease@lease-uevent,Fail
kms_pipe_crc_basic@nonblocking-crc,Fail
-kms_pipe_crc_basic@nonblocking-crc-frame-sequence,Fail
kms_writeback@writeback-check-output,Fail
kms_writeback@writeback-check-output-XRGB2101010,Fail
kms_writeback@writeback-fb-id,Fail
@@ -54,4 +50,3 @@ kms_writeback@writeback-fb-id-XRGB2101010,Fail
kms_writeback@writeback-invalid-parameters,Fail
kms_writeback@writeback-pixel-formats,Fail
perf@i915-ref-count,Fail
-tools_test@tools_test,Fail
diff --git a/drivers/gpu/drm/ci/xfails/vkms-none-flakes.txt b/drivers/gpu/drm/ci/xfails/vkms-none-flakes.txt
index eeaa1d5825af..62428f3c8f31 100644
--- a/drivers/gpu/drm/ci/xfails/vkms-none-flakes.txt
+++ b/drivers/gpu/drm/ci/xfails/vkms-none-flakes.txt
@@ -67,3 +67,24 @@ kms_flip@flip-vs-absolute-wf_vblank-interruptible
# IGT Version: 1.28-g0df7b9b97
# Linux Version: 6.9.0-rc7
kms_flip@flip-vs-blocking-wf-vblank
+
+# Board Name: vkms
+# Bug Report: https://lore.kernel.org/dri-devel/[email protected]/T/#u
+# Failure Rate: 50
+# IGT Version: 1.28-gf13702b8e
+# Linux Version: 6.10.0-rc5
+kms_cursor_legacy@flip-vs-cursor-varying-size
+
+# Board Name: vkms
+# Bug Report: https://lore.kernel.org/dri-devel/[email protected]/T/#u
+# Failure Rate: 50
+# IGT Version: 1.28-gf13702b8e
+# Linux Version: 6.10.0-rc5
+kms_flip@flip-vs-expired-vblank
+
+# Board Name: vkms
+# Bug Report: https://lore.kernel.org/dri-devel/[email protected]/T/#u
+# Failure Rate: 50
+# IGT Version: 1.28-gf13702b8e
+# Linux Version: 6.10.0-rc5
+kms_pipe_crc_basic@nonblocking-crc-frame-sequence
diff --git a/drivers/gpu/drm/ci/xfails/vkms-none-skips.txt b/drivers/gpu/drm/ci/xfails/vkms-none-skips.txt
index fd5d1271115f..5ccc771fbb36 100644
--- a/drivers/gpu/drm/ci/xfails/vkms-none-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/vkms-none-skips.txt
@@ -104,11 +104,112 @@ kms_cursor_crc@cursor-rapid-movement-256x85
# CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
# CR2: 0000000000000078 CR3: 0000000109b38000 CR4: 0000000000350ef0
+kms_cursor_crc@cursor-onscreen-256x256
+# Oops: Oops: 0000 [#1] PREEMPT SMP NOPTI
+# CPU: 1 PID: 1913 Comm: kworker/u8:6 Not tainted 6.10.0-rc5-g8a28e73ebead #1
+# Hardware name: ChromiumOS crosvm, BIOS 0
+# Workqueue: vkms_composer vkms_composer_worker [vkms]
+# RIP: 0010:compose_active_planes+0x344/0x4e0 [vkms]
+# Code: 6a 34 0f 8e 91 fe ff ff 44 89 ea 48 8d 7c 24 48 e8 71 f0 ff ff 4b 8b 04 fc 48 8b 4c 24 50 48 8b 7c 24 40 48 8b 80 48 01 00 00 <48> 63 70 18 8b 40 20 48 89 f2 48 c1 e6 03 29 d0 48 8b 54 24 48 48
+# RSP: 0018:ffffb477409fbd58 EFLAGS: 00010282
+# RAX: 0000000000000000 RBX: 0000000000000002 RCX: ffff8b124a242000
+# RDX: 00000000000000ff RSI: ffff8b124a243ff8 RDI: ffff8b124a244000
+# RBP: 0000000000000002 R08: 0000000000000000 R09: 00000000000003ff
+# R10: ffff8b124a244000 R11: 0000000000000000 R12: ffff8b1249282f30
+# R13: 0000000000000002 R14: 0000000000000002 R15: 0000000000000000
+# FS: 0000000000000000(0000) GS:ffff8b126bd00000(0000) knlGS:0000000000000000
+# CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+# CR2: 0000000000000018 CR3: 0000000107a86000 CR4: 0000000000350ef0
+# Call Trace:
+# <TASK>
+# ? __die+0x1e/0x60
+# ? page_fault_oops+0x17b/0x4a0
+# ? exc_page_fault+0x6d/0x230
+# ? asm_exc_page_fault+0x26/0x30
+# ? compose_active_planes+0x344/0x4e0 [vkms]
+# ? compose_active_planes+0x32f/0x4e0 [vkms]
+# ? srso_return_thunk+0x5/0x5f
+# vkms_composer_worker+0x205/0x240 [vkms]
+# process_one_work+0x201/0x6c0
+# ? lock_is_held_type+0x9e/0x110
+# worker_thread+0x17e/0x350
+# ? __pfx_worker_thread+0x10/0x10
+# kthread+0xce/0x100
+# ? __pfx_kthread+0x10/0x10
+# ret_from_fork+0x2f/0x50
+# ? __pfx_kthread+0x10/0x10
+# ret_from_fork_asm+0x1a/0x30
+# </TASK>
+# Modules linked in: vkms
+# CR2: 0000000000000018
+# ---[ end trace 0000000000000000 ]---
+# RIP: 0010:compose_active_planes+0x344/0x4e0 [vkms]
+# Code: 6a 34 0f 8e 91 fe ff ff 44 89 ea 48 8d 7c 24 48 e8 71 f0 ff ff 4b 8b 04 fc 48 8b 4c 24 50 48 8b 7c 24 40 48 8b 80 48 01 00 00 <48> 63 70 18 8b 40 20 48 89 f2 48 c1 e6 03 29 d0 48 8b 54 24 48 48
+# RSP: 0018:ffffb477409fbd58 EFLAGS: 00010282
+# RAX: 0000000000000000 RBX: 0000000000000002 RCX: ffff8b124a242000
+# RDX: 00000000000000ff RSI: ffff8b124a243ff8 RDI: ffff8b124a244000
+# RBP: 0000000000000002 R08: 0000000000000000 R09: 00000000000003ff
+# R10: ffff8b124a244000 R11: 0000000000000000 R12: ffff8b1249282f30
+# R13: 0000000000000002 R14: 0000000000000002 R15: 0000000000000000
+# FS: 0000000000000000(0000) GS:ffff8b126bd00000(0000) knlGS:0000000000000000
+# CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+# CR2: 0000000000000018 CR3: 0000000107a86000 CR4: 0000000000350ef0
+
+kms_cursor_edge_walk@128x128-right-edge
+# Oops: Oops: 0000 [#1] PREEMPT SMP NOPTI
+# CPU: 0 PID: 1911 Comm: kworker/u8:3 Not tainted 6.10.0-rc5-g5e7a002eefe5 #1
+# Hardware name: ChromiumOS crosvm, BIOS 0
+# Workqueue: vkms_composer vkms_composer_worker [vkms]
+# RIP: 0010:compose_active_planes+0x344/0x4e0 [vkms]
+# Code: 6a 34 0f 8e 91 fe ff ff 44 89 ea 48 8d 7c 24 48 e8 71 f0 ff ff 4b 8b 04 fc 48 8b 4c 24 50 48 8b 7c 24 40 48 8b 80 48 01 00 00 <48> 63 70 18 8b 40 20 48 89 f2 48 c1 e6 03 29 d0 48 8b 54 24 48 48
+# RSP: 0018:ffffb2f040a43d58 EFLAGS: 00010282
+# RAX: 0000000000000000 RBX: 0000000000000002 RCX: ffffa2c181792000
+# RDX: 0000000000000000 RSI: ffffa2c181793ff8 RDI: ffffa2c181790000
+# RBP: 0000000000000031 R08: 0000000000000000 R09: 00000000000003ff
+# R10: ffffa2c181790000 R11: 0000000000000000 R12: ffffa2c1814fa810
+# R13: 0000000000000031 R14: 0000000000000031 R15: 0000000000000000
+# FS: 0000000000000000(0000) GS:ffffa2c1abc00000(0000) knlGS:0000000000000000
+# CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+# CR2: 0000000000000018 CR3: 0000000106768000 CR4: 0000000000350ef0
+# Call Trace:
+# <TASK>
+# ? __die+0x1e/0x60
+# ? page_fault_oops+0x17b/0x4a0
+# ? srso_return_thunk+0x5/0x5f
+# ? mark_held_locks+0x49/0x80
+# ? exc_page_fault+0x6d/0x230
+# ? asm_exc_page_fault+0x26/0x30
+# ? compose_active_planes+0x344/0x4e0 [vkms]
+# ? compose_active_planes+0x32f/0x4e0 [vkms]
+# ? srso_return_thunk+0x5/0x5f
+# vkms_composer_worker+0x205/0x240 [vkms]
+# process_one_work+0x201/0x6c0
+# ? lock_is_held_type+0x9e/0x110
+# worker_thread+0x17e/0x350
+# ? __pfx_worker_thread+0x10/0x10
+# kthread+0xce/0x100
+# ? __pfx_kthread+0x10/0x10
+# ret_from_fork+0x2f/0x50
+# ? __pfx_kthread+0x10/0x10
+# ret_from_fork_asm+0x1a/0x30
+# </TASK>
+# Modules linked in: vkms
+# CR2: 0000000000000018
+# ---[ end trace 0000000000000000 ]---
+# RIP: 0010:compose_active_planes+0x344/0x4e0 [vkms]
+# Code: 6a 34 0f 8e 91 fe ff ff 44 89 ea 48 8d 7c 24 48 e8 71 f0 ff ff 4b 8b 04 fc 48 8b 4c 24 50 48 8b 7c 24 40 48 8b 80 48 01 00 00 <48> 63 70 18 8b 40 20 48 89 f2 48 c1 e6 03 29 d0 48 8b 54 24 48 48
+# RSP: 0018:ffffb2f040a43d58 EFLAGS: 00010282
+# RAX: 0000000000000000 RBX: 0000000000000002 RCX: ffffa2c181792000
+# RDX: 0000000000000000 RSI: ffffa2c181793ff8 RDI: ffffa2c181790000
+# RBP: 0000000000000031 R08: 0000000000000000 R09: 00000000000003ff
+# R10: ffffa2c181790000 R11: 0000000000000000 R12: ffffa2c1814fa810
+# R13: 0000000000000031 R14: 0000000000000031 R15: 000000000000
+
# Skip driver specific tests
^amdgpu.*
-msm_.*
+^msm.*
nouveau_.*
-panfrost_.*
+^panfrost.*
^v3d.*
^vc4.*
^vmwgfx*
@@ -117,3 +218,4 @@ panfrost_.*
gem_.*
i915_.*
xe_.*
+tools_test.*
diff --git a/drivers/gpu/drm/display/drm_dp_helper.c b/drivers/gpu/drm/display/drm_dp_helper.c
index d4c34f364140..6ee51003de3c 100644
--- a/drivers/gpu/drm/display/drm_dp_helper.c
+++ b/drivers/gpu/drm/display/drm_dp_helper.c
@@ -2328,6 +2328,31 @@ drm_dp_get_quirks(const struct drm_dp_dpcd_ident *ident, bool is_branch)
#undef DEVICE_ID_ANY
#undef DEVICE_ID
+static int drm_dp_read_ident(struct drm_dp_aux *aux, unsigned int offset,
+ struct drm_dp_dpcd_ident *ident)
+{
+ int ret;
+
+ ret = drm_dp_dpcd_read(aux, offset, ident, sizeof(*ident));
+
+ return ret < 0 ? ret : 0;
+}
+
+static void drm_dp_dump_desc(struct drm_dp_aux *aux,
+ const char *device_name, const struct drm_dp_desc *desc)
+{
+ const struct drm_dp_dpcd_ident *ident = &desc->ident;
+
+ drm_dbg_kms(aux->drm_dev,
+ "%s: %s: OUI %*phD dev-ID %*pE HW-rev %d.%d SW-rev %d.%d quirks 0x%04x\n",
+ aux->name, device_name,
+ (int)sizeof(ident->oui), ident->oui,
+ (int)strnlen(ident->device_id, sizeof(ident->device_id)), ident->device_id,
+ ident->hw_rev >> 4, ident->hw_rev & 0xf,
+ ident->sw_major_rev, ident->sw_minor_rev,
+ desc->quirks);
+}
+
/**
* drm_dp_read_desc - read sink/branch descriptor from DPCD
* @aux: DisplayPort AUX channel
@@ -2344,28 +2369,49 @@ int drm_dp_read_desc(struct drm_dp_aux *aux, struct drm_dp_desc *desc,
{
struct drm_dp_dpcd_ident *ident = &desc->ident;
unsigned int offset = is_branch ? DP_BRANCH_OUI : DP_SINK_OUI;
- int ret, dev_id_len;
+ int ret;
- ret = drm_dp_dpcd_read(aux, offset, ident, sizeof(*ident));
+ ret = drm_dp_read_ident(aux, offset, ident);
if (ret < 0)
return ret;
desc->quirks = drm_dp_get_quirks(ident, is_branch);
- dev_id_len = strnlen(ident->device_id, sizeof(ident->device_id));
-
- drm_dbg_kms(aux->drm_dev,
- "%s: DP %s: OUI %*phD dev-ID %*pE HW-rev %d.%d SW-rev %d.%d quirks 0x%04x\n",
- aux->name, is_branch ? "branch" : "sink",
- (int)sizeof(ident->oui), ident->oui, dev_id_len,
- ident->device_id, ident->hw_rev >> 4, ident->hw_rev & 0xf,
- ident->sw_major_rev, ident->sw_minor_rev, desc->quirks);
+ drm_dp_dump_desc(aux, is_branch ? "DP branch" : "DP sink", desc);
return 0;
}
EXPORT_SYMBOL(drm_dp_read_desc);
/**
+ * drm_dp_dump_lttpr_desc - read and dump the DPCD descriptor for an LTTPR PHY
+ * @aux: DisplayPort AUX channel
+ * @dp_phy: LTTPR PHY instance
+ *
+ * Read the DPCD LTTPR PHY descriptor for @dp_phy and print a debug message
+ * with its details to dmesg.
+ *
+ * Returns 0 on success or a negative error code on failure.
+ */
+int drm_dp_dump_lttpr_desc(struct drm_dp_aux *aux, enum drm_dp_phy dp_phy)
+{
+ struct drm_dp_desc desc = {};
+ int ret;
+
+ if (drm_WARN_ON(aux->drm_dev, dp_phy < DP_PHY_LTTPR1 || dp_phy > DP_MAX_LTTPR_COUNT))
+ return -EINVAL;
+
+ ret = drm_dp_read_ident(aux, DP_OUI_PHY_REPEATER(dp_phy), &desc.ident);
+ if (ret < 0)
+ return ret;
+
+ drm_dp_dump_desc(aux, drm_dp_phy_name(dp_phy), &desc);
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_dp_dump_lttpr_desc);
+
+/**
* drm_dp_dsc_sink_bpp_incr() - Get bits per pixel increment
* @dsc_dpcd: DSC capabilities from DPCD
*
diff --git a/drivers/gpu/drm/display/drm_dp_mst_topology.c b/drivers/gpu/drm/display/drm_dp_mst_topology.c
index bcc5bbed9bd0..379a449a28a2 100644
--- a/drivers/gpu/drm/display/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/display/drm_dp_mst_topology.c
@@ -4992,7 +4992,7 @@ void drm_dp_mst_dump_topology(struct seq_file *m,
seq_printf(m, "branch oui: %*phN devid: ", 3, buf);
for (i = 0x3; i < 0x8 && buf[i]; i++)
- seq_printf(m, "%c", buf[i]);
+ seq_putc(m, buf[i]);
seq_printf(m, " revision: hw: %x.%x sw: %x.%x\n",
buf[0x9] >> 4, buf[0x9] & 0xf, buf[0xa], buf[0xb]);
if (dump_dp_payload_table(mgr, buf))
diff --git a/drivers/gpu/drm/drm_atomic_uapi.c b/drivers/gpu/drm/drm_atomic_uapi.c
index 22bbb2d83e30..7609c798d73d 100644
--- a/drivers/gpu/drm/drm_atomic_uapi.c
+++ b/drivers/gpu/drm/drm_atomic_uapi.c
@@ -1070,7 +1070,10 @@ int drm_atomic_set_property(struct drm_atomic_state *state,
break;
}
- if (async_flip && prop != config->prop_fb_id) {
+ if (async_flip &&
+ prop != config->prop_fb_id &&
+ prop != config->prop_in_fence_fd &&
+ prop != config->prop_fb_damage_clips) {
ret = drm_atomic_plane_get_property(plane, plane_state,
prop, &old_val);
ret = drm_atomic_check_prop_changes(ret, old_val, prop_value, prop);
diff --git a/drivers/gpu/drm/drm_bridge.c b/drivers/gpu/drm/drm_bridge.c
index d44f055dbe3e..c6af46dd02bf 100644
--- a/drivers/gpu/drm/drm_bridge.c
+++ b/drivers/gpu/drm/drm_bridge.c
@@ -353,8 +353,13 @@ err_reset_bridge:
bridge->encoder = NULL;
list_del(&bridge->chain_node);
- DRM_ERROR("failed to attach bridge %pOF to encoder %s: %d\n",
- bridge->of_node, encoder->name, ret);
+ if (ret != -EPROBE_DEFER)
+ DRM_ERROR("failed to attach bridge %pOF to encoder %s: %d\n",
+ bridge->of_node, encoder->name, ret);
+ else
+ dev_err_probe(encoder->dev->dev, -EPROBE_DEFER,
+ "failed to attach bridge %pOF to encoder %s\n",
+ bridge->of_node, encoder->name);
return ret;
}
diff --git a/drivers/gpu/drm/drm_bridge_connector.c b/drivers/gpu/drm/drm_bridge_connector.c
index 0869b663f17e..a4fbf1eb7ac5 100644
--- a/drivers/gpu/drm/drm_bridge_connector.c
+++ b/drivers/gpu/drm/drm_bridge_connector.c
@@ -443,10 +443,8 @@ struct drm_connector *drm_bridge_connector_init(struct drm_device *drm,
panel_bridge = bridge;
}
- if (connector_type == DRM_MODE_CONNECTOR_Unknown) {
- kfree(bridge_connector);
+ if (connector_type == DRM_MODE_CONNECTOR_Unknown)
return ERR_PTR(-EINVAL);
- }
if (bridge_connector->bridge_hdmi)
ret = drmm_connector_hdmi_init(drm, connector,
@@ -461,10 +459,8 @@ struct drm_connector *drm_bridge_connector_init(struct drm_device *drm,
ret = drmm_connector_init(drm, connector,
&drm_bridge_connector_funcs,
connector_type, ddc);
- if (ret) {
- kfree(bridge_connector);
+ if (ret)
return ERR_PTR(ret);
- }
drm_connector_helper_add(connector, &drm_bridge_connector_helper_funcs);
diff --git a/drivers/gpu/drm/drm_client.c b/drivers/gpu/drm/drm_client.c
index 2803ac111bbd..bfedcbf516db 100644
--- a/drivers/gpu/drm/drm_client.c
+++ b/drivers/gpu/drm/drm_client.c
@@ -355,7 +355,7 @@ int drm_client_buffer_vmap_local(struct drm_client_buffer *buffer,
err_drm_gem_vmap_unlocked:
drm_gem_unlock(gem);
- return 0;
+ return ret;
}
EXPORT_SYMBOL(drm_client_buffer_vmap_local);
diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c
index ab6ab7ff7ea8..80e239a64938 100644
--- a/drivers/gpu/drm/drm_connector.c
+++ b/drivers/gpu/drm/drm_connector.c
@@ -426,6 +426,8 @@ static void drm_connector_cleanup_action(struct drm_device *dev,
*
* The connector structure should be allocated with drmm_kzalloc().
*
+ * The @drm_connector_funcs.destroy hook must be NULL.
+ *
* Returns:
* Zero on success, error code on failure.
*/
@@ -474,6 +476,8 @@ EXPORT_SYMBOL(drmm_connector_init);
*
* The connector structure should be allocated with drmm_kzalloc().
*
+ * The @drm_connector_funcs.destroy hook must be NULL.
+ *
* Returns:
* Zero on success, error code on failure.
*/
@@ -2315,24 +2319,67 @@ EXPORT_SYMBOL(drm_mode_create_aspect_ratio_property);
* DOC: standard connector properties
*
* Colorspace:
- * This property helps select a suitable colorspace based on the sink
- * capability. Modern sink devices support wider gamut like BT2020.
- * This helps switch to BT2020 mode if the BT2020 encoded video stream
- * is being played by the user, same for any other colorspace. Thereby
- * giving a good visual experience to users.
- *
- * The expectation from userspace is that it should parse the EDID
- * and get supported colorspaces. Use this property and switch to the
- * one supported. Sink supported colorspaces should be retrieved by
- * userspace from EDID and driver will not explicitly expose them.
- *
- * Basically the expectation from userspace is:
- * - Set up CRTC DEGAMMA/CTM/GAMMA to convert to some sink
- * colorspace
- * - Set this new property to let the sink know what it
- * converted the CRTC output to.
- * - This property is just to inform sink what colorspace
- * source is trying to drive.
+ * This property is used to inform the driver about the color encoding
+ * user space configured the pixel operation properties to produce.
+ * The variants set the colorimetry, transfer characteristics, and which
+ * YCbCr conversion should be used when necessary.
+ * The transfer characteristics from HDR_OUTPUT_METADATA takes precedence
+ * over this property.
+ * User space always configures the pixel operation properties to produce
+ * full quantization range data (see the Broadcast RGB property).
+ *
+ * Drivers inform the sink about what colorimetry, transfer
+ * characteristics, YCbCr conversion, and quantization range to expect
+ * (this can depend on the output mode, output format and other
+ * properties). Drivers also convert the user space provided data to what
+ * the sink expects.
+ *
+ * User space has to check if the sink supports all of the possible
+ * colorimetries that the driver is allowed to pick by parsing the EDID.
+ *
+ * For historical reasons this property exposes a number of variants which
+ * result in undefined behavior.
+ *
+ * Default:
+ * The behavior is driver-specific.
+ * BT2020_RGB:
+ * BT2020_YCC:
+ * User space configures the pixel operation properties to produce
+ * RGB content with Rec. ITU-R BT.2020 colorimetry, Rec.
+ * ITU-R BT.2020 (Table 4, RGB) transfer characteristics and full
+ * quantization range.
+ * User space can use the HDR_OUTPUT_METADATA property to set the
+ * transfer characteristics to PQ (Rec. ITU-R BT.2100 Table 4) or
+ * HLG (Rec. ITU-R BT.2100 Table 5) in which case, user space
+ * configures pixel operation properties to produce content with
+ * the respective transfer characteristics.
+ * User space has to make sure the sink supports Rec.
+ * ITU-R BT.2020 R'G'B' and Rec. ITU-R BT.2020 Y'C'BC'R
+ * colorimetry.
+ * Drivers can configure the sink to use an RGB format, tell the
+ * sink to expect Rec. ITU-R BT.2020 R'G'B' colorimetry and convert
+ * to the appropriate quantization range.
+ * Drivers can configure the sink to use a YCbCr format, tell the
+ * sink to expect Rec. ITU-R BT.2020 Y'C'BC'R colorimetry, convert
+ * to YCbCr using the Rec. ITU-R BT.2020 non-constant luminance
+ * conversion matrix and convert to the appropriate quantization
+ * range.
+ * The variants BT2020_RGB and BT2020_YCC are equivalent and the
+ * driver chooses between RGB and YCbCr on its own.
+ * SMPTE_170M_YCC:
+ * BT709_YCC:
+ * XVYCC_601:
+ * XVYCC_709:
+ * SYCC_601:
+ * opYCC_601:
+ * opRGB:
+ * BT2020_CYCC:
+ * DCI-P3_RGB_D65:
+ * DCI-P3_RGB_Theater:
+ * RGB_WIDE_FIXED:
+ * RGB_WIDE_FLOAT:
+ * BT601_YCC:
+ * The behavior is undefined.
*
* Because between HDMI and DP have different colorspaces,
* drm_mode_create_hdmi_colorspace_property() is used for HDMI connector and
diff --git a/drivers/gpu/drm/drm_crtc_internal.h b/drivers/gpu/drm/drm_crtc_internal.h
index 1f73b8d6d750..061436361998 100644
--- a/drivers/gpu/drm/drm_crtc_internal.h
+++ b/drivers/gpu/drm/drm_crtc_internal.h
@@ -315,4 +315,11 @@ drm_edid_load_firmware(struct drm_connector *connector)
}
#endif
+/* drm_panic.c */
+#ifdef CONFIG_DRM_PANIC
+bool drm_panic_is_enabled(struct drm_device *dev);
+#else
+static inline bool drm_panic_is_enabled(struct drm_device *dev) { return false; }
+#endif
+
#endif /* __DRM_CRTC_INTERNAL_H__ */
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 18565ec68451..3f7da78849e4 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -44,6 +44,7 @@
#include <drm/drm_vblank.h>
#include "drm_internal.h"
+#include "drm_crtc_internal.h"
static bool drm_fbdev_emulation = true;
module_param_named(fbdev_emulation, drm_fbdev_emulation, bool, 0600);
@@ -527,6 +528,7 @@ struct fb_info *drm_fb_helper_alloc_info(struct drm_fb_helper *fb_helper)
fb_helper->info = info;
info->skip_vt_switch = true;
+ info->skip_panic = drm_panic_is_enabled(fb_helper->dev);
return info;
err_release:
@@ -624,6 +626,17 @@ static void drm_fb_helper_add_damage_clip(struct drm_fb_helper *helper, u32 x, u
static void drm_fb_helper_damage(struct drm_fb_helper *helper, u32 x, u32 y,
u32 width, u32 height)
{
+ /*
+ * This function may be invoked by panic() to flush the frame
+ * buffer, where all CPUs except the panic CPU are stopped.
+ * During the following schedule_work(), the panic CPU needs
+ * the worker_pool lock, which might be held by a stopped CPU,
+ * causing schedule_work() and panic() to block. Return early on
+ * oops_in_progress to prevent this blocking.
+ */
+ if (oops_in_progress)
+ return;
+
drm_fb_helper_add_damage_clip(helper, x, y, width, height);
schedule_work(&helper->damage_work);
diff --git a/drivers/gpu/drm/drm_mode_config.c b/drivers/gpu/drm/drm_mode_config.c
index 568972258222..37d2e0a4ef4b 100644
--- a/drivers/gpu/drm/drm_mode_config.c
+++ b/drivers/gpu/drm/drm_mode_config.c
@@ -456,6 +456,8 @@ int drmm_mode_config_init(struct drm_device *dev)
if (ret == -EDEADLK)
ret = drm_modeset_backoff(&modeset_ctx);
+ might_fault();
+
ww_acquire_init(&resv_ctx, &reservation_ww_class);
ret = dma_resv_lock(&resv, &resv_ctx);
if (ret == -EDEADLK)
diff --git a/drivers/gpu/drm/drm_panel.c b/drivers/gpu/drm/drm_panel.c
index cfbe020de54e..19ab0a794add 100644
--- a/drivers/gpu/drm/drm_panel.c
+++ b/drivers/gpu/drm/drm_panel.c
@@ -161,6 +161,15 @@ int drm_panel_unprepare(struct drm_panel *panel)
if (!panel)
return -EINVAL;
+ /*
+ * If you are seeing the warning below it likely means one of two things:
+ * - Your panel driver incorrectly calls drm_panel_unprepare() in its
+ * shutdown routine. You should delete this.
+ * - You are using panel-edp or panel-simple and your DRM modeset
+ * driver's shutdown() callback happened after the panel's shutdown().
+ * In this case the warning is harmless though ideally you should
+ * figure out how to reverse the order of the shutdown() callbacks.
+ */
if (!panel->prepared) {
dev_warn(panel->dev, "Skipping unprepare of already unprepared panel\n");
return 0;
@@ -245,6 +254,15 @@ int drm_panel_disable(struct drm_panel *panel)
if (!panel)
return -EINVAL;
+ /*
+ * If you are seeing the warning below it likely means one of two things:
+ * - Your panel driver incorrectly calls drm_panel_disable() in its
+ * shutdown routine. You should delete this.
+ * - You are using panel-edp or panel-simple and your DRM modeset
+ * driver's shutdown() callback happened after the panel's shutdown().
+ * In this case the warning is harmless though ideally you should
+ * figure out how to reverse the order of the shutdown() callbacks.
+ */
if (!panel->enabled) {
dev_warn(panel->dev, "Skipping disable of already disabled panel\n");
return 0;
diff --git a/drivers/gpu/drm/drm_panel_orientation_quirks.c b/drivers/gpu/drm/drm_panel_orientation_quirks.c
index 3f84d7527793..c16c7678237e 100644
--- a/drivers/gpu/drm/drm_panel_orientation_quirks.c
+++ b/drivers/gpu/drm/drm_panel_orientation_quirks.c
@@ -414,6 +414,12 @@ static const struct dmi_system_id orientation_data[] = {
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "ONE XPLAYER"),
},
.driver_data = (void *)&lcd1600x2560_leftside_up,
+ }, { /* OrangePi Neo */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "OrangePi"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "NEO-01"),
+ },
+ .driver_data = (void *)&lcd1200x1920_rightside_up,
}, { /* Samsung GalaxyBook 10.6 */
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
diff --git a/drivers/gpu/drm/drm_panic.c b/drivers/gpu/drm/drm_panic.c
index 948aed00595e..072752b658f0 100644
--- a/drivers/gpu/drm/drm_panic.c
+++ b/drivers/gpu/drm/drm_panic.c
@@ -27,6 +27,8 @@
#include <drm/drm_plane.h>
#include <drm/drm_print.h>
+#include "drm_crtc_internal.h"
+
MODULE_AUTHOR("Jocelyn Falempe");
MODULE_DESCRIPTION("DRM panic handler");
MODULE_LICENSE("GPL");
@@ -655,11 +657,11 @@ static struct drm_plane *to_drm_plane(struct kmsg_dumper *kd)
return container_of(kd, struct drm_plane, kmsg_panic);
}
-static void drm_panic(struct kmsg_dumper *dumper, enum kmsg_dump_reason reason)
+static void drm_panic(struct kmsg_dumper *dumper, struct kmsg_dump_detail *detail)
{
struct drm_plane *plane = to_drm_plane(dumper);
- if (reason == KMSG_DUMP_PANIC)
+ if (detail->reason == KMSG_DUMP_PANIC)
draw_panic_plane(plane);
}
@@ -704,6 +706,26 @@ static void debugfs_register_plane(struct drm_plane *plane, int index) {}
#endif /* CONFIG_DRM_PANIC_DEBUG */
/**
+ * drm_panic_is_enabled
+ * @dev: the drm device that may supports drm_panic
+ *
+ * returns true if the drm device supports drm_panic
+ */
+bool drm_panic_is_enabled(struct drm_device *dev)
+{
+ struct drm_plane *plane;
+
+ if (!dev->mode_config.num_total_plane)
+ return false;
+
+ drm_for_each_plane(plane, dev)
+ if (plane->helper_private && plane->helper_private->get_scanout_buffer)
+ return true;
+ return false;
+}
+EXPORT_SYMBOL(drm_panic_is_enabled);
+
+/**
* drm_panic_register() - Initialize DRM panic for a device
* @dev: the drm device on which the panic screen will be displayed.
*/
diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c
index bb49d552e671..285290067056 100644
--- a/drivers/gpu/drm/drm_probe_helper.c
+++ b/drivers/gpu/drm/drm_probe_helper.c
@@ -888,7 +888,7 @@ EXPORT_SYMBOL(drm_kms_helper_is_poll_worker);
* disabled. Polling is re-enabled by calling drm_kms_helper_poll_enable().
*
* If however, the polling was never initialized, this call will trigger a
- * warning and return
+ * warning and return.
*
* Note that calls to enable and disable polling must be strictly ordered, which
* is automatically the case when they're only call from suspend/resume
diff --git a/drivers/gpu/drm/drm_vblank.c b/drivers/gpu/drm/drm_vblank.c
index cc3571e25a9a..c6b4cd77df72 100644
--- a/drivers/gpu/drm/drm_vblank.c
+++ b/drivers/gpu/drm/drm_vblank.c
@@ -131,7 +131,7 @@
* guaranteed to be enabled.
*
* On many hardware disabling the vblank interrupt cannot be done in a race-free
- * manner, see &drm_driver.vblank_disable_immediate and
+ * manner, see &drm_vblank_crtc_config.disable_immediate and
* &drm_driver.max_vblank_count. In that case the vblank core only disables the
* vblanks after a timer has expired, which can be configured through the
* ``vblankoffdelay`` module parameter.
@@ -1241,6 +1241,7 @@ EXPORT_SYMBOL(drm_crtc_vblank_get);
void drm_vblank_put(struct drm_device *dev, unsigned int pipe)
{
struct drm_vblank_crtc *vblank = drm_vblank_crtc(dev, pipe);
+ int vblank_offdelay = vblank->config.offdelay_ms;
if (drm_WARN_ON(dev, pipe >= dev->num_crtcs))
return;
@@ -1250,13 +1251,13 @@ void drm_vblank_put(struct drm_device *dev, unsigned int pipe)
/* Last user schedules interrupt disable */
if (atomic_dec_and_test(&vblank->refcount)) {
- if (drm_vblank_offdelay == 0)
+ if (!vblank_offdelay)
return;
- else if (drm_vblank_offdelay < 0)
+ else if (vblank_offdelay < 0)
vblank_disable_fn(&vblank->disable_timer);
- else if (!dev->vblank_disable_immediate)
+ else if (!vblank->config.disable_immediate)
mod_timer(&vblank->disable_timer,
- jiffies + ((drm_vblank_offdelay * HZ)/1000));
+ jiffies + ((vblank_offdelay * HZ) / 1000));
}
}
@@ -1265,7 +1266,8 @@ void drm_vblank_put(struct drm_device *dev, unsigned int pipe)
* @crtc: which counter to give up
*
* Release ownership of a given vblank counter, turning off interrupts
- * if possible. Disable interrupts after drm_vblank_offdelay milliseconds.
+ * if possible. Disable interrupts after &drm_vblank_crtc_config.offdelay_ms
+ * milliseconds.
*/
void drm_crtc_vblank_put(struct drm_crtc *crtc)
{
@@ -1466,16 +1468,20 @@ void drm_crtc_set_max_vblank_count(struct drm_crtc *crtc,
EXPORT_SYMBOL(drm_crtc_set_max_vblank_count);
/**
- * drm_crtc_vblank_on - enable vblank events on a CRTC
+ * drm_crtc_vblank_on_config - enable vblank events on a CRTC with custom
+ * configuration options
* @crtc: CRTC in question
+ * @config: Vblank configuration value
*
- * This functions restores the vblank interrupt state captured with
- * drm_crtc_vblank_off() again and is generally called when enabling @crtc. Note
- * that calls to drm_crtc_vblank_on() and drm_crtc_vblank_off() can be
- * unbalanced and so can also be unconditionally called in driver load code to
- * reflect the current hardware state of the crtc.
+ * See drm_crtc_vblank_on(). In addition, this function allows you to provide a
+ * custom vblank configuration for a given CRTC.
+ *
+ * Note that @config is copied, the pointer does not need to stay valid beyond
+ * this function call. For details of the parameters see
+ * struct drm_vblank_crtc_config.
*/
-void drm_crtc_vblank_on(struct drm_crtc *crtc)
+void drm_crtc_vblank_on_config(struct drm_crtc *crtc,
+ const struct drm_vblank_crtc_config *config)
{
struct drm_device *dev = crtc->dev;
unsigned int pipe = drm_crtc_index(crtc);
@@ -1488,6 +1494,8 @@ void drm_crtc_vblank_on(struct drm_crtc *crtc)
drm_dbg_vbl(dev, "crtc %d, vblank enabled %d, inmodeset %d\n",
pipe, vblank->enabled, vblank->inmodeset);
+ vblank->config = *config;
+
/* Drop our private "prevent drm_vblank_get" refcount */
if (vblank->inmodeset) {
atomic_dec(&vblank->refcount);
@@ -1500,10 +1508,33 @@ void drm_crtc_vblank_on(struct drm_crtc *crtc)
* re-enable interrupts if there are users left, or the
* user wishes vblank interrupts to be enabled all the time.
*/
- if (atomic_read(&vblank->refcount) != 0 || drm_vblank_offdelay == 0)
+ if (atomic_read(&vblank->refcount) != 0 || !vblank->config.offdelay_ms)
drm_WARN_ON(dev, drm_vblank_enable(dev, pipe));
spin_unlock_irq(&dev->vbl_lock);
}
+EXPORT_SYMBOL(drm_crtc_vblank_on_config);
+
+/**
+ * drm_crtc_vblank_on - enable vblank events on a CRTC
+ * @crtc: CRTC in question
+ *
+ * This functions restores the vblank interrupt state captured with
+ * drm_crtc_vblank_off() again and is generally called when enabling @crtc. Note
+ * that calls to drm_crtc_vblank_on() and drm_crtc_vblank_off() can be
+ * unbalanced and so can also be unconditionally called in driver load code to
+ * reflect the current hardware state of the crtc.
+ *
+ * Note that unlike in drm_crtc_vblank_on_config(), default values are used.
+ */
+void drm_crtc_vblank_on(struct drm_crtc *crtc)
+{
+ const struct drm_vblank_crtc_config config = {
+ .offdelay_ms = drm_vblank_offdelay,
+ .disable_immediate = crtc->dev->vblank_disable_immediate
+ };
+
+ drm_crtc_vblank_on_config(crtc, &config);
+}
EXPORT_SYMBOL(drm_crtc_vblank_on);
static void drm_vblank_restore(struct drm_device *dev, unsigned int pipe)
@@ -1556,16 +1587,21 @@ static void drm_vblank_restore(struct drm_device *dev, unsigned int pipe)
*
* Note that drivers must have race-free high-precision timestamping support,
* i.e. &drm_crtc_funcs.get_vblank_timestamp must be hooked up and
- * &drm_driver.vblank_disable_immediate must be set to indicate the
+ * &drm_vblank_crtc_config.disable_immediate must be set to indicate the
* time-stamping functions are race-free against vblank hardware counter
* increments.
*/
void drm_crtc_vblank_restore(struct drm_crtc *crtc)
{
- WARN_ON_ONCE(!crtc->funcs->get_vblank_timestamp);
- WARN_ON_ONCE(!crtc->dev->vblank_disable_immediate);
+ struct drm_device *dev = crtc->dev;
+ unsigned int pipe = drm_crtc_index(crtc);
+ struct drm_vblank_crtc *vblank = drm_vblank_crtc(dev, pipe);
+
+ drm_WARN_ON_ONCE(dev, !crtc->funcs->get_vblank_timestamp);
+ drm_WARN_ON_ONCE(dev, vblank->inmodeset);
+ drm_WARN_ON_ONCE(dev, !vblank->config.disable_immediate);
- drm_vblank_restore(crtc->dev, drm_crtc_index(crtc));
+ drm_vblank_restore(dev, pipe);
}
EXPORT_SYMBOL(drm_crtc_vblank_restore);
@@ -1754,7 +1790,7 @@ int drm_wait_vblank_ioctl(struct drm_device *dev, void *data,
/* If the counter is currently enabled and accurate, short-circuit
* queries to return the cached timestamp of the last vblank.
*/
- if (dev->vblank_disable_immediate &&
+ if (vblank->config.disable_immediate &&
drm_wait_vblank_is_query(vblwait) &&
READ_ONCE(vblank->enabled)) {
drm_wait_vblank_reply(dev, pipe, &vblwait->reply);
@@ -1918,8 +1954,8 @@ bool drm_handle_vblank(struct drm_device *dev, unsigned int pipe)
* been signaled. The disable has to be last (after
* drm_handle_vblank_events) so that the timestamp is always accurate.
*/
- disable_irq = (dev->vblank_disable_immediate &&
- drm_vblank_offdelay > 0 &&
+ disable_irq = (vblank->config.disable_immediate &&
+ vblank->config.offdelay_ms > 0 &&
!atomic_read(&vblank->refcount));
drm_handle_vblank_events(dev, pipe);
@@ -1992,7 +2028,8 @@ int drm_crtc_get_sequence_ioctl(struct drm_device *dev, void *data,
pipe = drm_crtc_index(crtc);
vblank = drm_crtc_vblank_crtc(crtc);
- vblank_enabled = dev->vblank_disable_immediate && READ_ONCE(vblank->enabled);
+ vblank_enabled = READ_ONCE(vblank->config.disable_immediate) &&
+ READ_ONCE(vblank->enabled);
if (!vblank_enabled) {
ret = drm_crtc_vblank_get(crtc);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_sched.c b/drivers/gpu/drm/etnaviv/etnaviv_sched.c
index 62dcfdc7894d..ab9ca4824b62 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_sched.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_sched.c
@@ -72,7 +72,7 @@ static enum drm_gpu_sched_stat etnaviv_sched_timedout_job(struct drm_sched_job
drm_sched_resubmit_jobs(&gpu->sched);
- drm_sched_start(&gpu->sched, true);
+ drm_sched_start(&gpu->sched);
return DRM_GPU_SCHED_STAT_NOMINAL;
out_no_timeout:
diff --git a/drivers/gpu/drm/gma500/cdv_intel_lvds.c b/drivers/gpu/drm/gma500/cdv_intel_lvds.c
index 3adc2c9ab72d..f3a4517bdf27 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_lvds.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_lvds.c
@@ -568,7 +568,7 @@ void cdv_intel_lvds_init(struct drm_device *dev,
dev->dev, "I2C bus registration failed.\n");
goto err_encoder_cleanup;
}
- gma_encoder->i2c_bus->slave_addr = 0x2C;
+ gma_encoder->i2c_bus->target_addr = 0x2C;
dev_priv->lvds_i2c_bus = gma_encoder->i2c_bus;
/*
diff --git a/drivers/gpu/drm/gma500/intel_bios.c b/drivers/gpu/drm/gma500/intel_bios.c
index 8245b5603d2c..d5924ca3ed05 100644
--- a/drivers/gpu/drm/gma500/intel_bios.c
+++ b/drivers/gpu/drm/gma500/intel_bios.c
@@ -14,8 +14,8 @@
#include "psb_intel_drv.h"
#include "psb_intel_reg.h"
-#define SLAVE_ADDR1 0x70
-#define SLAVE_ADDR2 0x72
+#define TARGET_ADDR1 0x70
+#define TARGET_ADDR2 0x72
static void *find_section(struct bdb_header *bdb, int section_id)
{
@@ -357,10 +357,10 @@ parse_sdvo_device_mapping(struct drm_psb_private *dev_priv,
/* skip the device block if device type is invalid */
continue;
}
- if (p_child->slave_addr != SLAVE_ADDR1 &&
- p_child->slave_addr != SLAVE_ADDR2) {
+ if (p_child->target_addr != TARGET_ADDR1 &&
+ p_child->target_addr != TARGET_ADDR2) {
/*
- * If the slave address is neither 0x70 nor 0x72,
+ * If the target address is neither 0x70 nor 0x72,
* it is not a SDVO device. Skip it.
*/
continue;
@@ -371,22 +371,22 @@ parse_sdvo_device_mapping(struct drm_psb_private *dev_priv,
DRM_DEBUG_KMS("Incorrect SDVO port. Skip it\n");
continue;
}
- DRM_DEBUG_KMS("the SDVO device with slave addr %2x is found on"
+ DRM_DEBUG_KMS("the SDVO device with target addr %2x is found on"
" %s port\n",
- p_child->slave_addr,
+ p_child->target_addr,
(p_child->dvo_port == DEVICE_PORT_DVOB) ?
"SDVOB" : "SDVOC");
p_mapping = &(dev_priv->sdvo_mappings[p_child->dvo_port - 1]);
if (!p_mapping->initialized) {
p_mapping->dvo_port = p_child->dvo_port;
- p_mapping->slave_addr = p_child->slave_addr;
+ p_mapping->target_addr = p_child->target_addr;
p_mapping->dvo_wiring = p_child->dvo_wiring;
p_mapping->ddc_pin = p_child->ddc_pin;
p_mapping->i2c_pin = p_child->i2c_pin;
p_mapping->initialized = 1;
DRM_DEBUG_KMS("SDVO device: dvo=%x, addr=%x, wiring=%d, ddc_pin=%d, i2c_pin=%d\n",
p_mapping->dvo_port,
- p_mapping->slave_addr,
+ p_mapping->target_addr,
p_mapping->dvo_wiring,
p_mapping->ddc_pin,
p_mapping->i2c_pin);
@@ -394,10 +394,10 @@ parse_sdvo_device_mapping(struct drm_psb_private *dev_priv,
DRM_DEBUG_KMS("Maybe one SDVO port is shared by "
"two SDVO device.\n");
}
- if (p_child->slave2_addr) {
+ if (p_child->target2_addr) {
/* Maybe this is a SDVO device with multiple inputs */
/* And the mapping info is not added */
- DRM_DEBUG_KMS("there exists the slave2_addr. Maybe this"
+ DRM_DEBUG_KMS("there exists the target2_addr. Maybe this"
" is a SDVO device with multiple inputs.\n");
}
count++;
diff --git a/drivers/gpu/drm/gma500/intel_bios.h b/drivers/gpu/drm/gma500/intel_bios.h
index 0e6facf21e33..b5adea2a20c3 100644
--- a/drivers/gpu/drm/gma500/intel_bios.h
+++ b/drivers/gpu/drm/gma500/intel_bios.h
@@ -186,13 +186,13 @@ struct child_device_config {
u16 addin_offset;
u8 dvo_port; /* See Device_PORT_* above */
u8 i2c_pin;
- u8 slave_addr;
+ u8 target_addr;
u8 ddc_pin;
u16 edid_ptr;
u8 dvo_cfg; /* See DEVICE_CFG_* above */
u8 dvo2_port;
u8 i2c2_pin;
- u8 slave2_addr;
+ u8 target2_addr;
u8 ddc2_pin;
u8 capabilities;
u8 dvo_wiring;/* See DEVICE_WIRE_* above */
diff --git a/drivers/gpu/drm/gma500/intel_gmbus.c b/drivers/gpu/drm/gma500/intel_gmbus.c
index aa45509859f2..ee8b047587f2 100644
--- a/drivers/gpu/drm/gma500/intel_gmbus.c
+++ b/drivers/gpu/drm/gma500/intel_gmbus.c
@@ -333,7 +333,7 @@ gmbus_xfer(struct i2c_adapter *adapter,
clear_err:
/* Toggle the Software Clear Interrupt bit. This has the effect
* of resetting the GMBUS controller and so clearing the
- * BUS_ERROR raised by the slave's NAK.
+ * BUS_ERROR raised by the target's NAK.
*/
GMBUS_REG_WRITE(GMBUS1 + reg_offset, GMBUS_SW_CLR_INT);
GMBUS_REG_WRITE(GMBUS1 + reg_offset, 0);
diff --git a/drivers/gpu/drm/gma500/psb_drv.h b/drivers/gpu/drm/gma500/psb_drv.h
index 83c17689c454..bddf89b82fec 100644
--- a/drivers/gpu/drm/gma500/psb_drv.h
+++ b/drivers/gpu/drm/gma500/psb_drv.h
@@ -202,7 +202,7 @@ struct psb_intel_opregion {
struct sdvo_device_mapping {
u8 initialized;
u8 dvo_port;
- u8 slave_addr;
+ u8 target_addr;
u8 dvo_wiring;
u8 i2c_pin;
u8 i2c_speed;
diff --git a/drivers/gpu/drm/gma500/psb_intel_drv.h b/drivers/gpu/drm/gma500/psb_intel_drv.h
index c111e933e1ed..2499fd6a80c9 100644
--- a/drivers/gpu/drm/gma500/psb_intel_drv.h
+++ b/drivers/gpu/drm/gma500/psb_intel_drv.h
@@ -80,7 +80,7 @@ struct psb_intel_mode_device {
struct gma_i2c_chan {
struct i2c_adapter base;
struct i2c_algo_bit_data algo;
- u8 slave_addr;
+ u8 target_addr;
/* for getting at dev. private (mmio etc.) */
struct drm_device *drm_dev;
diff --git a/drivers/gpu/drm/gma500/psb_intel_lvds.c b/drivers/gpu/drm/gma500/psb_intel_lvds.c
index 8d1be94a443b..138f153d38ba 100644
--- a/drivers/gpu/drm/gma500/psb_intel_lvds.c
+++ b/drivers/gpu/drm/gma500/psb_intel_lvds.c
@@ -97,7 +97,7 @@ static int psb_lvds_i2c_set_brightness(struct drm_device *dev,
struct i2c_msg msgs[] = {
{
- .addr = lvds_i2c_bus->slave_addr,
+ .addr = lvds_i2c_bus->target_addr,
.flags = 0,
.len = 2,
.buf = out_buf,
@@ -710,7 +710,7 @@ void psb_intel_lvds_init(struct drm_device *dev,
dev->dev, "I2C bus registration failed.\n");
goto err_encoder_cleanup;
}
- lvds_priv->i2c_bus->slave_addr = 0x2C;
+ lvds_priv->i2c_bus->target_addr = 0x2C;
dev_priv->lvds_i2c_bus = lvds_priv->i2c_bus;
/*
diff --git a/drivers/gpu/drm/gma500/psb_intel_sdvo.c b/drivers/gpu/drm/gma500/psb_intel_sdvo.c
index e4f914deceba..8dafff963ca8 100644
--- a/drivers/gpu/drm/gma500/psb_intel_sdvo.c
+++ b/drivers/gpu/drm/gma500/psb_intel_sdvo.c
@@ -70,7 +70,7 @@ struct psb_intel_sdvo {
struct gma_encoder base;
struct i2c_adapter *i2c;
- u8 slave_addr;
+ u8 target_addr;
struct i2c_adapter ddc;
@@ -259,13 +259,13 @@ static bool psb_intel_sdvo_read_byte(struct psb_intel_sdvo *psb_intel_sdvo, u8 a
{
struct i2c_msg msgs[] = {
{
- .addr = psb_intel_sdvo->slave_addr,
+ .addr = psb_intel_sdvo->target_addr,
.flags = 0,
.len = 1,
.buf = &addr,
},
{
- .addr = psb_intel_sdvo->slave_addr,
+ .addr = psb_intel_sdvo->target_addr,
.flags = I2C_M_RD,
.len = 1,
.buf = ch,
@@ -463,14 +463,14 @@ static bool psb_intel_sdvo_write_cmd(struct psb_intel_sdvo *psb_intel_sdvo, u8 c
psb_intel_sdvo_debug_write(psb_intel_sdvo, cmd, args, args_len);
for (i = 0; i < args_len; i++) {
- msgs[i].addr = psb_intel_sdvo->slave_addr;
+ msgs[i].addr = psb_intel_sdvo->target_addr;
msgs[i].flags = 0;
msgs[i].len = 2;
msgs[i].buf = buf + 2 *i;
buf[2*i + 0] = SDVO_I2C_ARG_0 - i;
buf[2*i + 1] = ((u8*)args)[i];
}
- msgs[i].addr = psb_intel_sdvo->slave_addr;
+ msgs[i].addr = psb_intel_sdvo->target_addr;
msgs[i].flags = 0;
msgs[i].len = 2;
msgs[i].buf = buf + 2*i;
@@ -479,12 +479,12 @@ static bool psb_intel_sdvo_write_cmd(struct psb_intel_sdvo *psb_intel_sdvo, u8 c
/* the following two are to read the response */
status = SDVO_I2C_CMD_STATUS;
- msgs[i+1].addr = psb_intel_sdvo->slave_addr;
+ msgs[i+1].addr = psb_intel_sdvo->target_addr;
msgs[i+1].flags = 0;
msgs[i+1].len = 1;
msgs[i+1].buf = &status;
- msgs[i+2].addr = psb_intel_sdvo->slave_addr;
+ msgs[i+2].addr = psb_intel_sdvo->target_addr;
msgs[i+2].flags = I2C_M_RD;
msgs[i+2].len = 1;
msgs[i+2].buf = &status;
@@ -1899,7 +1899,7 @@ psb_intel_sdvo_is_hdmi_connector(struct psb_intel_sdvo *psb_intel_sdvo, int devi
}
static u8
-psb_intel_sdvo_get_slave_addr(struct drm_device *dev, int sdvo_reg)
+psb_intel_sdvo_get_target_addr(struct drm_device *dev, int sdvo_reg)
{
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct sdvo_device_mapping *my_mapping, *other_mapping;
@@ -1913,14 +1913,14 @@ psb_intel_sdvo_get_slave_addr(struct drm_device *dev, int sdvo_reg)
}
/* If the BIOS described our SDVO device, take advantage of it. */
- if (my_mapping->slave_addr)
- return my_mapping->slave_addr;
+ if (my_mapping->target_addr)
+ return my_mapping->target_addr;
/* If the BIOS only described a different SDVO device, use the
* address that it isn't using.
*/
- if (other_mapping->slave_addr) {
- if (other_mapping->slave_addr == 0x70)
+ if (other_mapping->target_addr) {
+ if (other_mapping->target_addr == 0x70)
return 0x72;
else
return 0x70;
@@ -2446,7 +2446,7 @@ bool psb_intel_sdvo_init(struct drm_device *dev, int sdvo_reg)
return false;
psb_intel_sdvo->sdvo_reg = sdvo_reg;
- psb_intel_sdvo->slave_addr = psb_intel_sdvo_get_slave_addr(dev, sdvo_reg) >> 1;
+ psb_intel_sdvo->target_addr = psb_intel_sdvo_get_target_addr(dev, sdvo_reg) >> 1;
psb_intel_sdvo_select_i2c_bus(dev_priv, psb_intel_sdvo, sdvo_reg);
if (!psb_intel_sdvo_init_ddc_proxy(psb_intel_sdvo, dev)) {
kfree(psb_intel_sdvo);
diff --git a/drivers/gpu/drm/i915/display/intel_display_wa.h b/drivers/gpu/drm/i915/display/intel_display_wa.h
index 63201d09852c..be644ab6ae00 100644
--- a/drivers/gpu/drm/i915/display/intel_display_wa.h
+++ b/drivers/gpu/drm/i915/display/intel_display_wa.h
@@ -6,8 +6,16 @@
#ifndef __INTEL_DISPLAY_WA_H__
#define __INTEL_DISPLAY_WA_H__
+#include <linux/types.h>
+
struct drm_i915_private;
void intel_display_wa_apply(struct drm_i915_private *i915);
+#ifdef I915
+static inline bool intel_display_needs_wa_16023588340(struct drm_i915_private *i915) { return false; }
+#else
+bool intel_display_needs_wa_16023588340(struct drm_i915_private *i915);
+#endif
+
#endif
diff --git a/drivers/gpu/drm/i915/display/intel_fbc.c b/drivers/gpu/drm/i915/display/intel_fbc.c
index 4c91a2b69a09..5eda258616ae 100644
--- a/drivers/gpu/drm/i915/display/intel_fbc.c
+++ b/drivers/gpu/drm/i915/display/intel_fbc.c
@@ -56,6 +56,7 @@
#include "intel_display_device.h"
#include "intel_display_trace.h"
#include "intel_display_types.h"
+#include "intel_display_wa.h"
#include "intel_fbc.h"
#include "intel_fbc_regs.h"
#include "intel_frontbuffer.h"
@@ -1309,6 +1310,11 @@ static int intel_fbc_check_plane(struct intel_atomic_state *state,
return 0;
}
+ if (intel_display_needs_wa_16023588340(i915)) {
+ plane_state->no_fbc_reason = "Wa_16023588340";
+ return 0;
+ }
+
/* WaFbcTurnOffFbcWhenHyperVisorIsUsed:skl,bxt */
if (i915_vtd_active(i915) && (IS_SKYLAKE(i915) || IS_BROXTON(i915))) {
plane_state->no_fbc_reason = "VT-d enabled";
diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
index 0b1cd4c7a525..025a79fe5920 100644
--- a/drivers/gpu/drm/i915/i915_perf.c
+++ b/drivers/gpu/drm/i915/i915_perf.c
@@ -2749,26 +2749,6 @@ oa_configure_all_contexts(struct i915_perf_stream *stream,
}
static int
-gen12_configure_all_contexts(struct i915_perf_stream *stream,
- const struct i915_oa_config *oa_config,
- struct i915_active *active)
-{
- struct flex regs[] = {
- {
- GEN8_R_PWR_CLK_STATE(RENDER_RING_BASE),
- CTX_R_PWR_CLK_STATE,
- },
- };
-
- if (stream->engine->class != RENDER_CLASS)
- return 0;
-
- return oa_configure_all_contexts(stream,
- regs, ARRAY_SIZE(regs),
- active);
-}
-
-static int
lrc_configure_all_contexts(struct i915_perf_stream *stream,
const struct i915_oa_config *oa_config,
struct i915_active *active)
@@ -2874,7 +2854,6 @@ gen12_enable_metric_set(struct i915_perf_stream *stream,
{
struct drm_i915_private *i915 = stream->perf->i915;
struct intel_uncore *uncore = stream->uncore;
- struct i915_oa_config *oa_config = stream->oa_config;
bool periodic = stream->periodic;
u32 period_exponent = stream->period_exponent;
u32 sqcnt1;
@@ -2919,15 +2898,6 @@ gen12_enable_metric_set(struct i915_perf_stream *stream,
intel_uncore_rmw(uncore, GEN12_SQCNT1, 0, sqcnt1);
/*
- * Update all contexts prior writing the mux configurations as we need
- * to make sure all slices/subslices are ON before writing to NOA
- * registers.
- */
- ret = gen12_configure_all_contexts(stream, oa_config, active);
- if (ret)
- return ret;
-
- /*
* For Gen12, performance counters are context
* saved/restored. Only enable it for the context that
* requested this.
@@ -2980,9 +2950,6 @@ static void gen12_disable_metric_set(struct i915_perf_stream *stream)
_MASKED_BIT_DISABLE(GEN12_DISABLE_DOP_GATING));
}
- /* Reset all contexts' slices/subslices configurations. */
- gen12_configure_all_contexts(stream, NULL, NULL);
-
/* disable the context save/restore or OAR counters */
if (stream->ctx)
gen12_configure_oar_context(stream, NULL);
diff --git a/drivers/gpu/drm/imagination/pvr_queue.c b/drivers/gpu/drm/imagination/pvr_queue.c
index 5ed9c98fb599..20cb46012082 100644
--- a/drivers/gpu/drm/imagination/pvr_queue.c
+++ b/drivers/gpu/drm/imagination/pvr_queue.c
@@ -782,7 +782,7 @@ static void pvr_queue_start(struct pvr_queue *queue)
}
}
- drm_sched_start(&queue->scheduler, true);
+ drm_sched_start(&queue->scheduler);
}
/**
@@ -842,7 +842,7 @@ pvr_queue_timedout_job(struct drm_sched_job *s_job)
}
mutex_unlock(&pvr_dev->queues.lock);
- drm_sched_start(sched, true);
+ drm_sched_start(sched);
return DRM_GPU_SCHED_STAT_NOMINAL;
}
diff --git a/drivers/gpu/drm/lima/lima_sched.c b/drivers/gpu/drm/lima/lima_sched.c
index bbf3f8feab94..1a944edb6ddc 100644
--- a/drivers/gpu/drm/lima/lima_sched.c
+++ b/drivers/gpu/drm/lima/lima_sched.c
@@ -463,7 +463,7 @@ static enum drm_gpu_sched_stat lima_sched_timedout_job(struct drm_sched_job *job
lima_pm_idle(ldev);
drm_sched_resubmit_jobs(&pipe->base);
- drm_sched_start(&pipe->base, true);
+ drm_sched_start(&pipe->base);
return DRM_GPU_SCHED_STAT_NOMINAL;
}
diff --git a/drivers/gpu/drm/loongson/lsdc_ttm.c b/drivers/gpu/drm/loongson/lsdc_ttm.c
index 465f622ac05d..2e42c6970c9f 100644
--- a/drivers/gpu/drm/loongson/lsdc_ttm.c
+++ b/drivers/gpu/drm/loongson/lsdc_ttm.c
@@ -341,16 +341,12 @@ void lsdc_bo_unpin(struct lsdc_bo *lbo)
void lsdc_bo_ref(struct lsdc_bo *lbo)
{
- struct ttm_buffer_object *tbo = &lbo->tbo;
-
- ttm_bo_get(tbo);
+ drm_gem_object_get(&lbo->tbo.base);
}
void lsdc_bo_unref(struct lsdc_bo *lbo)
{
- struct ttm_buffer_object *tbo = &lbo->tbo;
-
- ttm_bo_put(tbo);
+ drm_gem_object_put(&lbo->tbo.base);
}
int lsdc_bo_kmap(struct lsdc_bo *lbo)
diff --git a/drivers/gpu/drm/mgag200/Makefile b/drivers/gpu/drm/mgag200/Makefile
index d1b25f9f6586..5a02203fad12 100644
--- a/drivers/gpu/drm/mgag200/Makefile
+++ b/drivers/gpu/drm/mgag200/Makefile
@@ -12,6 +12,7 @@ mgag200-y := \
mgag200_g200se.o \
mgag200_g200wb.o \
mgag200_mode.o \
+ mgag200_vga_bmc.o \
mgag200_vga.o
obj-$(CONFIG_DRM_MGAG200) += mgag200.o
diff --git a/drivers/gpu/drm/mgag200/mgag200_bmc.c b/drivers/gpu/drm/mgag200/mgag200_bmc.c
index 23ef85aa7e37..a689c71ff165 100644
--- a/drivers/gpu/drm/mgag200/mgag200_bmc.c
+++ b/drivers/gpu/drm/mgag200/mgag200_bmc.c
@@ -9,12 +9,7 @@
#include "mgag200_drv.h"
-static struct mgag200_bmc_connector *to_mgag200_bmc_connector(struct drm_connector *connector)
-{
- return container_of(connector, struct mgag200_bmc_connector, base);
-}
-
-void mgag200_bmc_disable_vidrst(struct mga_device *mdev)
+void mgag200_bmc_stop_scanout(struct mga_device *mdev)
{
u8 tmp;
int iter_max;
@@ -73,15 +68,10 @@ void mgag200_bmc_disable_vidrst(struct mga_device *mdev)
}
}
-void mgag200_bmc_enable_vidrst(struct mga_device *mdev)
+void mgag200_bmc_start_scanout(struct mga_device *mdev)
{
u8 tmp;
- /* Ensure that the vrsten and hrsten are set */
- WREG8(MGAREG_CRTCEXT_INDEX, 1);
- tmp = RREG8(MGAREG_CRTCEXT_DATA);
- WREG8(MGAREG_CRTCEXT_DATA, tmp | 0x88);
-
/* Assert rstlvl2 */
WREG8(DAC_INDEX, MGA1064_REMHEADCTL2);
tmp = RREG8(DAC_DATA);
@@ -107,100 +97,3 @@ void mgag200_bmc_enable_vidrst(struct mga_device *mdev)
tmp &= ~0x10;
WREG_DAC(MGA1064_GEN_IO_DATA, tmp);
}
-
-static const struct drm_encoder_funcs mgag200_bmc_encoder_funcs = {
- .destroy = drm_encoder_cleanup,
-};
-
-static int mgag200_bmc_connector_helper_detect_ctx(struct drm_connector *connector,
- struct drm_modeset_acquire_ctx *ctx,
- bool force)
-{
- struct mgag200_bmc_connector *bmc_connector = to_mgag200_bmc_connector(connector);
- struct drm_connector *physical_connector = bmc_connector->physical_connector;
-
- /*
- * Most user-space compositors cannot handle more than one connected
- * connector per CRTC. Hence, we only mark the BMC as connected if the
- * physical connector is disconnected. If the physical connector's status
- * is connected or unknown, the BMC remains disconnected. This has no
- * effect on the output of the BMC.
- *
- * FIXME: Remove this logic once user-space compositors can handle more
- * than one connector per CRTC. The BMC should always be connected.
- */
-
- if (physical_connector && physical_connector->status == connector_status_disconnected)
- return connector_status_connected;
-
- return connector_status_disconnected;
-}
-
-static int mgag200_bmc_connector_helper_get_modes(struct drm_connector *connector)
-{
- struct drm_device *dev = connector->dev;
- struct mga_device *mdev = to_mga_device(dev);
- const struct mgag200_device_info *minfo = mdev->info;
-
- return drm_add_modes_noedid(connector, minfo->max_hdisplay, minfo->max_vdisplay);
-}
-
-static const struct drm_connector_helper_funcs mgag200_bmc_connector_helper_funcs = {
- .get_modes = mgag200_bmc_connector_helper_get_modes,
- .detect_ctx = mgag200_bmc_connector_helper_detect_ctx,
-};
-
-static const struct drm_connector_funcs mgag200_bmc_connector_funcs = {
- .reset = drm_atomic_helper_connector_reset,
- .fill_modes = drm_helper_probe_single_connector_modes,
- .destroy = drm_connector_cleanup,
- .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
- .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
-};
-
-static int mgag200_bmc_connector_init(struct drm_device *dev,
- struct mgag200_bmc_connector *bmc_connector,
- struct drm_connector *physical_connector)
-{
- struct drm_connector *connector = &bmc_connector->base;
- int ret;
-
- ret = drm_connector_init(dev, connector, &mgag200_bmc_connector_funcs,
- DRM_MODE_CONNECTOR_VIRTUAL);
- if (ret)
- return ret;
- drm_connector_helper_add(connector, &mgag200_bmc_connector_helper_funcs);
-
- bmc_connector->physical_connector = physical_connector;
-
- return 0;
-}
-
-int mgag200_bmc_output_init(struct mga_device *mdev, struct drm_connector *physical_connector)
-{
- struct drm_device *dev = &mdev->base;
- struct drm_crtc *crtc = &mdev->crtc;
- struct drm_encoder *encoder;
- struct mgag200_bmc_connector *bmc_connector;
- struct drm_connector *connector;
- int ret;
-
- encoder = &mdev->output.bmc.encoder;
- ret = drm_encoder_init(dev, encoder, &mgag200_bmc_encoder_funcs,
- DRM_MODE_ENCODER_VIRTUAL, NULL);
- if (ret)
- return ret;
- encoder->possible_crtcs = drm_crtc_mask(crtc);
-
- bmc_connector = &mdev->output.bmc.bmc_connector;
- ret = mgag200_bmc_connector_init(dev, bmc_connector, physical_connector);
- if (ret)
- return ret;
- connector = &bmc_connector->base;
-
- ret = drm_connector_attach_encoder(connector, encoder);
- if (ret)
- return ret;
-
- return 0;
-}
diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.c b/drivers/gpu/drm/mgag200/mgag200_drv.c
index 62080cf0f2da..6623ee4e3277 100644
--- a/drivers/gpu/drm/mgag200/mgag200_drv.c
+++ b/drivers/gpu/drm/mgag200/mgag200_drv.c
@@ -18,6 +18,7 @@
#include <drm/drm_managed.h>
#include <drm/drm_module.h>
#include <drm/drm_pciids.h>
+#include <drm/drm_vblank.h>
#include "mgag200_drv.h"
@@ -84,6 +85,34 @@ resource_size_t mgag200_probe_vram(void __iomem *mem, resource_size_t size)
return offset - 65536;
}
+static irqreturn_t mgag200_irq_handler(int irq, void *arg)
+{
+ struct drm_device *dev = arg;
+ struct mga_device *mdev = to_mga_device(dev);
+ struct drm_crtc *crtc;
+ u32 status, ien;
+
+ status = RREG32(MGAREG_STATUS);
+
+ if (status & MGAREG_STATUS_VLINEPEN) {
+ ien = RREG32(MGAREG_IEN);
+ if (!(ien & MGAREG_IEN_VLINEIEN))
+ goto out;
+
+ crtc = drm_crtc_from_index(dev, 0);
+ if (WARN_ON_ONCE(!crtc))
+ goto out;
+ drm_crtc_handle_vblank(crtc);
+
+ WREG32(MGAREG_ICLEAR, MGAREG_ICLEAR_VLINEICLR);
+
+ return IRQ_HANDLED;
+ }
+
+out:
+ return IRQ_NONE;
+}
+
/*
* DRM driver
*/
@@ -167,6 +196,7 @@ int mgag200_device_init(struct mga_device *mdev,
const struct mgag200_device_funcs *funcs)
{
struct drm_device *dev = &mdev->base;
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
u8 crtcext3, misc;
int ret;
@@ -192,6 +222,16 @@ int mgag200_device_init(struct mga_device *mdev,
mutex_unlock(&mdev->rmmio_lock);
+ WREG32(MGAREG_IEN, 0);
+ WREG32(MGAREG_ICLEAR, MGAREG_ICLEAR_VLINEICLR);
+
+ ret = devm_request_irq(&pdev->dev, pdev->irq, mgag200_irq_handler, IRQF_SHARED,
+ dev->driver->name, dev);
+ if (ret) {
+ drm_err(dev, "Failed to acquire interrupt, error %d\n", ret);
+ return ret;
+ }
+
return 0;
}
diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.h b/drivers/gpu/drm/mgag200/mgag200_drv.h
index 7f7dfbd0f013..4760ba92871b 100644
--- a/drivers/gpu/drm/mgag200/mgag200_drv.h
+++ b/drivers/gpu/drm/mgag200/mgag200_drv.h
@@ -179,6 +179,8 @@ struct mgag200_crtc_state {
const struct drm_format_info *format;
struct mgag200_pll_values pixpllc;
+
+ bool set_vidrst;
};
static inline struct mgag200_crtc_state *to_mgag200_crtc_state(struct drm_crtc_state *base)
@@ -186,11 +188,6 @@ static inline struct mgag200_crtc_state *to_mgag200_crtc_state(struct drm_crtc_s
return container_of(base, struct mgag200_crtc_state, base);
}
-struct mgag200_bmc_connector {
- struct drm_connector base;
- struct drm_connector *physical_connector;
-};
-
enum mga_type {
G200_PCI,
G200_AGP,
@@ -214,8 +211,8 @@ struct mgag200_device_info {
*/
unsigned long max_mem_bandwidth;
- /* HW has external source (e.g., BMC) to synchronize with */
- bool has_vidrst:1;
+ /* Synchronize scanout with BMC */
+ bool sync_bmc:1;
struct {
unsigned data_bit:3;
@@ -230,13 +227,13 @@ struct mgag200_device_info {
};
#define MGAG200_DEVICE_INFO_INIT(_max_hdisplay, _max_vdisplay, _max_mem_bandwidth, \
- _has_vidrst, _i2c_data_bit, _i2c_clock_bit, \
+ _sync_bmc, _i2c_data_bit, _i2c_clock_bit, \
_bug_no_startadd) \
{ \
.max_hdisplay = (_max_hdisplay), \
.max_vdisplay = (_max_vdisplay), \
.max_mem_bandwidth = (_max_mem_bandwidth), \
- .has_vidrst = (_has_vidrst), \
+ .sync_bmc = (_sync_bmc), \
.i2c = { \
.data_bit = (_i2c_data_bit), \
.clock_bit = (_i2c_clock_bit), \
@@ -246,18 +243,6 @@ struct mgag200_device_info {
struct mgag200_device_funcs {
/*
- * Disables an external reset source (i.e., BMC) before programming
- * a new display mode.
- */
- void (*disable_vidrst)(struct mga_device *mdev);
-
- /*
- * Enables an external reset source (i.e., BMC) after programming
- * a new display mode.
- */
- void (*enable_vidrst)(struct mga_device *mdev);
-
- /*
* Validate that the given state can be programmed into PIXPLLC. On
* success, the calculated parameters should be stored in the CRTC's
* state in struct @mgag200_crtc_state.pixpllc.
@@ -293,10 +278,6 @@ struct mga_device {
struct drm_encoder encoder;
struct drm_connector connector;
} vga;
- struct {
- struct drm_encoder encoder;
- struct mgag200_bmc_connector bmc_connector;
- } bmc;
} output;
};
@@ -410,17 +391,24 @@ int mgag200_crtc_helper_atomic_check(struct drm_crtc *crtc, struct drm_atomic_st
void mgag200_crtc_helper_atomic_flush(struct drm_crtc *crtc, struct drm_atomic_state *old_state);
void mgag200_crtc_helper_atomic_enable(struct drm_crtc *crtc, struct drm_atomic_state *old_state);
void mgag200_crtc_helper_atomic_disable(struct drm_crtc *crtc, struct drm_atomic_state *old_state);
+bool mgag200_crtc_helper_get_scanout_position(struct drm_crtc *crtc, bool in_vblank_irq,
+ int *vpos, int *hpos,
+ ktime_t *stime, ktime_t *etime,
+ const struct drm_display_mode *mode);
#define MGAG200_CRTC_HELPER_FUNCS \
.mode_valid = mgag200_crtc_helper_mode_valid, \
.atomic_check = mgag200_crtc_helper_atomic_check, \
.atomic_flush = mgag200_crtc_helper_atomic_flush, \
.atomic_enable = mgag200_crtc_helper_atomic_enable, \
- .atomic_disable = mgag200_crtc_helper_atomic_disable
+ .atomic_disable = mgag200_crtc_helper_atomic_disable, \
+ .get_scanout_position = mgag200_crtc_helper_get_scanout_position
void mgag200_crtc_reset(struct drm_crtc *crtc);
struct drm_crtc_state *mgag200_crtc_atomic_duplicate_state(struct drm_crtc *crtc);
void mgag200_crtc_atomic_destroy_state(struct drm_crtc *crtc, struct drm_crtc_state *crtc_state);
+int mgag200_crtc_enable_vblank(struct drm_crtc *crtc);
+void mgag200_crtc_disable_vblank(struct drm_crtc *crtc);
#define MGAG200_CRTC_FUNCS \
.reset = mgag200_crtc_reset, \
@@ -428,20 +416,26 @@ void mgag200_crtc_atomic_destroy_state(struct drm_crtc *crtc, struct drm_crtc_st
.set_config = drm_atomic_helper_set_config, \
.page_flip = drm_atomic_helper_page_flip, \
.atomic_duplicate_state = mgag200_crtc_atomic_duplicate_state, \
- .atomic_destroy_state = mgag200_crtc_atomic_destroy_state
+ .atomic_destroy_state = mgag200_crtc_atomic_destroy_state, \
+ .enable_vblank = mgag200_crtc_enable_vblank, \
+ .disable_vblank = mgag200_crtc_disable_vblank, \
+ .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp
-void mgag200_set_mode_regs(struct mga_device *mdev, const struct drm_display_mode *mode);
+void mgag200_set_mode_regs(struct mga_device *mdev, const struct drm_display_mode *mode,
+ bool set_vidrst);
void mgag200_set_format_regs(struct mga_device *mdev, const struct drm_format_info *format);
void mgag200_enable_display(struct mga_device *mdev);
void mgag200_init_registers(struct mga_device *mdev);
int mgag200_mode_config_init(struct mga_device *mdev, resource_size_t vram_available);
+/* mgag200_vga_bmc.c */
+int mgag200_vga_bmc_output_init(struct mga_device *mdev);
+
/* mgag200_vga.c */
int mgag200_vga_output_init(struct mga_device *mdev);
- /* mgag200_bmc.c */
-void mgag200_bmc_disable_vidrst(struct mga_device *mdev);
-void mgag200_bmc_enable_vidrst(struct mga_device *mdev);
-int mgag200_bmc_output_init(struct mga_device *mdev, struct drm_connector *physical_connector);
+/* mgag200_bmc.c */
+void mgag200_bmc_stop_scanout(struct mga_device *mdev);
+void mgag200_bmc_start_scanout(struct mga_device *mdev);
#endif /* __MGAG200_DRV_H__ */
diff --git a/drivers/gpu/drm/mgag200/mgag200_g200.c b/drivers/gpu/drm/mgag200/mgag200_g200.c
index f874e2949840..77ce8d36cef0 100644
--- a/drivers/gpu/drm/mgag200/mgag200_g200.c
+++ b/drivers/gpu/drm/mgag200/mgag200_g200.c
@@ -8,6 +8,7 @@
#include <drm/drm_drv.h>
#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_probe_helper.h>
+#include <drm/drm_vblank.h>
#include "mgag200_drv.h"
@@ -403,5 +404,9 @@ struct mga_device *mgag200_g200_device_create(struct pci_dev *pdev, const struct
drm_mode_config_reset(dev);
drm_kms_helper_poll_init(dev);
+ ret = drm_vblank_init(dev, 1);
+ if (ret)
+ return ERR_PTR(ret);
+
return mdev;
}
diff --git a/drivers/gpu/drm/mgag200/mgag200_g200eh.c b/drivers/gpu/drm/mgag200/mgag200_g200eh.c
index 52bf49ead5c5..09ced65c1d2f 100644
--- a/drivers/gpu/drm/mgag200/mgag200_g200eh.c
+++ b/drivers/gpu/drm/mgag200/mgag200_g200eh.c
@@ -8,6 +8,7 @@
#include <drm/drm_drv.h>
#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_probe_helper.h>
+#include <drm/drm_vblank.h>
#include "mgag200_drv.h"
@@ -214,11 +215,7 @@ static int mgag200_g200eh_pipeline_init(struct mga_device *mdev)
drm_mode_crtc_set_gamma_size(crtc, MGAG200_LUT_SIZE);
drm_crtc_enable_color_mgmt(crtc, 0, false, MGAG200_LUT_SIZE);
- ret = mgag200_vga_output_init(mdev);
- if (ret)
- return ret;
-
- ret = mgag200_bmc_output_init(mdev, &mdev->output.vga.connector);
+ ret = mgag200_vga_bmc_output_init(mdev);
if (ret)
return ret;
@@ -279,5 +276,9 @@ struct mga_device *mgag200_g200eh_device_create(struct pci_dev *pdev, const stru
drm_mode_config_reset(dev);
drm_kms_helper_poll_init(dev);
+ ret = drm_vblank_init(dev, 1);
+ if (ret)
+ return ERR_PTR(ret);
+
return mdev;
}
diff --git a/drivers/gpu/drm/mgag200/mgag200_g200eh3.c b/drivers/gpu/drm/mgag200/mgag200_g200eh3.c
index e7f89b2a59fd..5daa469137bd 100644
--- a/drivers/gpu/drm/mgag200/mgag200_g200eh3.c
+++ b/drivers/gpu/drm/mgag200/mgag200_g200eh3.c
@@ -7,6 +7,7 @@
#include <drm/drm_drv.h>
#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_probe_helper.h>
+#include <drm/drm_vblank.h>
#include "mgag200_drv.h"
@@ -118,11 +119,7 @@ static int mgag200_g200eh3_pipeline_init(struct mga_device *mdev)
drm_mode_crtc_set_gamma_size(crtc, MGAG200_LUT_SIZE);
drm_crtc_enable_color_mgmt(crtc, 0, false, MGAG200_LUT_SIZE);
- ret = mgag200_vga_output_init(mdev);
- if (ret)
- return ret;
-
- ret = mgag200_bmc_output_init(mdev, &mdev->output.vga.connector);
+ ret = mgag200_vga_bmc_output_init(mdev);
if (ret)
return ret;
@@ -184,5 +181,9 @@ struct mga_device *mgag200_g200eh3_device_create(struct pci_dev *pdev,
drm_mode_config_reset(dev);
drm_kms_helper_poll_init(dev);
+ ret = drm_vblank_init(dev, 1);
+ if (ret)
+ return ERR_PTR(ret);
+
return mdev;
}
diff --git a/drivers/gpu/drm/mgag200/mgag200_g200er.c b/drivers/gpu/drm/mgag200/mgag200_g200er.c
index 4e8a1756138d..09cfffafe130 100644
--- a/drivers/gpu/drm/mgag200/mgag200_g200er.c
+++ b/drivers/gpu/drm/mgag200/mgag200_g200er.c
@@ -8,6 +8,7 @@
#include <drm/drm_drv.h>
#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_probe_helper.h>
+#include <drm/drm_vblank.h>
#include "mgag200_drv.h"
@@ -191,11 +192,8 @@ static void mgag200_g200er_crtc_helper_atomic_enable(struct drm_crtc *crtc,
struct mgag200_crtc_state *mgag200_crtc_state = to_mgag200_crtc_state(crtc_state);
const struct drm_format_info *format = mgag200_crtc_state->format;
- if (funcs->disable_vidrst)
- funcs->disable_vidrst(mdev);
-
mgag200_set_format_regs(mdev, format);
- mgag200_set_mode_regs(mdev, adjusted_mode);
+ mgag200_set_mode_regs(mdev, adjusted_mode, mgag200_crtc_state->set_vidrst);
if (funcs->pixpllc_atomic_update)
funcs->pixpllc_atomic_update(crtc, old_state);
@@ -209,8 +207,7 @@ static void mgag200_g200er_crtc_helper_atomic_enable(struct drm_crtc *crtc,
mgag200_enable_display(mdev);
- if (funcs->enable_vidrst)
- funcs->enable_vidrst(mdev);
+ drm_crtc_vblank_on(crtc);
}
static const struct drm_crtc_helper_funcs mgag200_g200er_crtc_helper_funcs = {
@@ -218,7 +215,8 @@ static const struct drm_crtc_helper_funcs mgag200_g200er_crtc_helper_funcs = {
.atomic_check = mgag200_crtc_helper_atomic_check,
.atomic_flush = mgag200_crtc_helper_atomic_flush,
.atomic_enable = mgag200_g200er_crtc_helper_atomic_enable,
- .atomic_disable = mgag200_crtc_helper_atomic_disable
+ .atomic_disable = mgag200_crtc_helper_atomic_disable,
+ .get_scanout_position = mgag200_crtc_helper_get_scanout_position,
};
static const struct drm_crtc_funcs mgag200_g200er_crtc_funcs = {
@@ -257,11 +255,7 @@ static int mgag200_g200er_pipeline_init(struct mga_device *mdev)
drm_mode_crtc_set_gamma_size(crtc, MGAG200_LUT_SIZE);
drm_crtc_enable_color_mgmt(crtc, 0, false, MGAG200_LUT_SIZE);
- ret = mgag200_vga_output_init(mdev);
- if (ret)
- return ret;
-
- ret = mgag200_bmc_output_init(mdev, &mdev->output.vga.connector);
+ ret = mgag200_vga_bmc_output_init(mdev);
if (ret)
return ret;
@@ -318,5 +312,9 @@ struct mga_device *mgag200_g200er_device_create(struct pci_dev *pdev, const stru
drm_mode_config_reset(dev);
drm_kms_helper_poll_init(dev);
+ ret = drm_vblank_init(dev, 1);
+ if (ret)
+ return ERR_PTR(ret);
+
return mdev;
}
diff --git a/drivers/gpu/drm/mgag200/mgag200_g200ev.c b/drivers/gpu/drm/mgag200/mgag200_g200ev.c
index d884f3cb0ec7..3d48baa91d8b 100644
--- a/drivers/gpu/drm/mgag200/mgag200_g200ev.c
+++ b/drivers/gpu/drm/mgag200/mgag200_g200ev.c
@@ -8,6 +8,7 @@
#include <drm/drm_drv.h>
#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_probe_helper.h>
+#include <drm/drm_vblank.h>
#include "mgag200_drv.h"
@@ -192,11 +193,8 @@ static void mgag200_g200ev_crtc_helper_atomic_enable(struct drm_crtc *crtc,
struct mgag200_crtc_state *mgag200_crtc_state = to_mgag200_crtc_state(crtc_state);
const struct drm_format_info *format = mgag200_crtc_state->format;
- if (funcs->disable_vidrst)
- funcs->disable_vidrst(mdev);
-
mgag200_set_format_regs(mdev, format);
- mgag200_set_mode_regs(mdev, adjusted_mode);
+ mgag200_set_mode_regs(mdev, adjusted_mode, mgag200_crtc_state->set_vidrst);
if (funcs->pixpllc_atomic_update)
funcs->pixpllc_atomic_update(crtc, old_state);
@@ -210,8 +208,7 @@ static void mgag200_g200ev_crtc_helper_atomic_enable(struct drm_crtc *crtc,
mgag200_enable_display(mdev);
- if (funcs->enable_vidrst)
- funcs->enable_vidrst(mdev);
+ drm_crtc_vblank_on(crtc);
}
static const struct drm_crtc_helper_funcs mgag200_g200ev_crtc_helper_funcs = {
@@ -219,7 +216,8 @@ static const struct drm_crtc_helper_funcs mgag200_g200ev_crtc_helper_funcs = {
.atomic_check = mgag200_crtc_helper_atomic_check,
.atomic_flush = mgag200_crtc_helper_atomic_flush,
.atomic_enable = mgag200_g200ev_crtc_helper_atomic_enable,
- .atomic_disable = mgag200_crtc_helper_atomic_disable
+ .atomic_disable = mgag200_crtc_helper_atomic_disable,
+ .get_scanout_position = mgag200_crtc_helper_get_scanout_position,
};
static const struct drm_crtc_funcs mgag200_g200ev_crtc_funcs = {
@@ -258,11 +256,7 @@ static int mgag200_g200ev_pipeline_init(struct mga_device *mdev)
drm_mode_crtc_set_gamma_size(crtc, MGAG200_LUT_SIZE);
drm_crtc_enable_color_mgmt(crtc, 0, false, MGAG200_LUT_SIZE);
- ret = mgag200_vga_output_init(mdev);
- if (ret)
- return ret;
-
- ret = mgag200_bmc_output_init(mdev, &mdev->output.vga.connector);
+ ret = mgag200_vga_bmc_output_init(mdev);
if (ret)
return ret;
@@ -323,5 +317,9 @@ struct mga_device *mgag200_g200ev_device_create(struct pci_dev *pdev, const stru
drm_mode_config_reset(dev);
drm_kms_helper_poll_init(dev);
+ ret = drm_vblank_init(dev, 1);
+ if (ret)
+ return ERR_PTR(ret);
+
return mdev;
}
diff --git a/drivers/gpu/drm/mgag200/mgag200_g200ew3.c b/drivers/gpu/drm/mgag200/mgag200_g200ew3.c
index 839401e8b465..dabc778e64e8 100644
--- a/drivers/gpu/drm/mgag200/mgag200_g200ew3.c
+++ b/drivers/gpu/drm/mgag200/mgag200_g200ew3.c
@@ -7,6 +7,7 @@
#include <drm/drm_drv.h>
#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_probe_helper.h>
+#include <drm/drm_vblank.h>
#include "mgag200_drv.h"
@@ -127,11 +128,7 @@ static int mgag200_g200ew3_pipeline_init(struct mga_device *mdev)
drm_mode_crtc_set_gamma_size(crtc, MGAG200_LUT_SIZE);
drm_crtc_enable_color_mgmt(crtc, 0, false, MGAG200_LUT_SIZE);
- ret = mgag200_vga_output_init(mdev);
- if (ret)
- return ret;
-
- ret = mgag200_bmc_output_init(mdev, &mdev->output.vga.connector);
+ ret = mgag200_vga_bmc_output_init(mdev);
if (ret)
return ret;
@@ -146,8 +143,6 @@ static const struct mgag200_device_info mgag200_g200ew3_device_info =
MGAG200_DEVICE_INFO_INIT(2048, 2048, 0, true, 0, 1, false);
static const struct mgag200_device_funcs mgag200_g200ew3_device_funcs = {
- .disable_vidrst = mgag200_bmc_disable_vidrst,
- .enable_vidrst = mgag200_bmc_enable_vidrst,
.pixpllc_atomic_check = mgag200_g200ew3_pixpllc_atomic_check,
.pixpllc_atomic_update = mgag200_g200wb_pixpllc_atomic_update, // same as G200WB
};
@@ -204,5 +199,9 @@ struct mga_device *mgag200_g200ew3_device_create(struct pci_dev *pdev,
drm_mode_config_reset(dev);
drm_kms_helper_poll_init(dev);
+ ret = drm_vblank_init(dev, 1);
+ if (ret)
+ return ERR_PTR(ret);
+
return mdev;
}
diff --git a/drivers/gpu/drm/mgag200/mgag200_g200se.c b/drivers/gpu/drm/mgag200/mgag200_g200se.c
index a824bb8ad579..9dcbe8304271 100644
--- a/drivers/gpu/drm/mgag200/mgag200_g200se.c
+++ b/drivers/gpu/drm/mgag200/mgag200_g200se.c
@@ -8,6 +8,7 @@
#include <drm/drm_drv.h>
#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_probe_helper.h>
+#include <drm/drm_vblank.h>
#include "mgag200_drv.h"
@@ -323,11 +324,8 @@ static void mgag200_g200se_crtc_helper_atomic_enable(struct drm_crtc *crtc,
struct mgag200_crtc_state *mgag200_crtc_state = to_mgag200_crtc_state(crtc_state);
const struct drm_format_info *format = mgag200_crtc_state->format;
- if (funcs->disable_vidrst)
- funcs->disable_vidrst(mdev);
-
mgag200_set_format_regs(mdev, format);
- mgag200_set_mode_regs(mdev, adjusted_mode);
+ mgag200_set_mode_regs(mdev, adjusted_mode, mgag200_crtc_state->set_vidrst);
if (funcs->pixpllc_atomic_update)
funcs->pixpllc_atomic_update(crtc, old_state);
@@ -341,8 +339,7 @@ static void mgag200_g200se_crtc_helper_atomic_enable(struct drm_crtc *crtc,
mgag200_enable_display(mdev);
- if (funcs->enable_vidrst)
- funcs->enable_vidrst(mdev);
+ drm_crtc_vblank_on(crtc);
}
static const struct drm_crtc_helper_funcs mgag200_g200se_crtc_helper_funcs = {
@@ -350,7 +347,8 @@ static const struct drm_crtc_helper_funcs mgag200_g200se_crtc_helper_funcs = {
.atomic_check = mgag200_crtc_helper_atomic_check,
.atomic_flush = mgag200_crtc_helper_atomic_flush,
.atomic_enable = mgag200_g200se_crtc_helper_atomic_enable,
- .atomic_disable = mgag200_crtc_helper_atomic_disable
+ .atomic_disable = mgag200_crtc_helper_atomic_disable,
+ .get_scanout_position = mgag200_crtc_helper_get_scanout_position,
};
static const struct drm_crtc_funcs mgag200_g200se_crtc_funcs = {
@@ -389,11 +387,7 @@ static int mgag200_g200se_pipeline_init(struct mga_device *mdev)
drm_mode_crtc_set_gamma_size(crtc, MGAG200_LUT_SIZE);
drm_crtc_enable_color_mgmt(crtc, 0, false, MGAG200_LUT_SIZE);
- ret = mgag200_vga_output_init(mdev);
- if (ret)
- return ret;
-
- ret = mgag200_bmc_output_init(mdev, &mdev->output.vga.connector);
+ ret = mgag200_vga_bmc_output_init(mdev);
if (ret)
return ret;
@@ -523,5 +517,9 @@ struct mga_device *mgag200_g200se_device_create(struct pci_dev *pdev, const stru
drm_mode_config_reset(dev);
drm_kms_helper_poll_init(dev);
+ ret = drm_vblank_init(dev, 1);
+ if (ret)
+ return ERR_PTR(ret);
+
return mdev;
}
diff --git a/drivers/gpu/drm/mgag200/mgag200_g200wb.c b/drivers/gpu/drm/mgag200/mgag200_g200wb.c
index 835df0f4fc13..83a24aedbf2f 100644
--- a/drivers/gpu/drm/mgag200/mgag200_g200wb.c
+++ b/drivers/gpu/drm/mgag200/mgag200_g200wb.c
@@ -8,6 +8,7 @@
#include <drm/drm_drv.h>
#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_probe_helper.h>
+#include <drm/drm_vblank.h>
#include "mgag200_drv.h"
@@ -261,11 +262,7 @@ static int mgag200_g200wb_pipeline_init(struct mga_device *mdev)
drm_mode_crtc_set_gamma_size(crtc, MGAG200_LUT_SIZE);
drm_crtc_enable_color_mgmt(crtc, 0, false, MGAG200_LUT_SIZE);
- ret = mgag200_vga_output_init(mdev);
- if (ret)
- return ret;
-
- ret = mgag200_bmc_output_init(mdev, &mdev->output.vga.connector);
+ ret = mgag200_vga_bmc_output_init(mdev);
if (ret)
return ret;
@@ -280,8 +277,6 @@ static const struct mgag200_device_info mgag200_g200wb_device_info =
MGAG200_DEVICE_INFO_INIT(1280, 1024, 31877, true, 0, 1, false);
static const struct mgag200_device_funcs mgag200_g200wb_device_funcs = {
- .disable_vidrst = mgag200_bmc_disable_vidrst,
- .enable_vidrst = mgag200_bmc_enable_vidrst,
.pixpllc_atomic_check = mgag200_g200wb_pixpllc_atomic_check,
.pixpllc_atomic_update = mgag200_g200wb_pixpllc_atomic_update,
};
@@ -328,5 +323,9 @@ struct mga_device *mgag200_g200wb_device_create(struct pci_dev *pdev, const stru
drm_mode_config_reset(dev);
drm_kms_helper_poll_init(dev);
+ ret = drm_vblank_init(dev, 1);
+ if (ret)
+ return ERR_PTR(ret);
+
return mdev;
}
diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c
index d4550e4b3b01..7159909aca1e 100644
--- a/drivers/gpu/drm/mgag200/mgag200_mode.c
+++ b/drivers/gpu/drm/mgag200/mgag200_mode.c
@@ -22,6 +22,7 @@
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_panic.h>
#include <drm/drm_print.h>
+#include <drm/drm_vblank.h>
#include "mgag200_ddc.h"
#include "mgag200_drv.h"
@@ -201,26 +202,39 @@ void mgag200_init_registers(struct mga_device *mdev)
WREG8(MGA_MISC_OUT, misc);
}
-void mgag200_set_mode_regs(struct mga_device *mdev, const struct drm_display_mode *mode)
+void mgag200_set_mode_regs(struct mga_device *mdev, const struct drm_display_mode *mode,
+ bool set_vidrst)
{
- const struct mgag200_device_info *info = mdev->info;
- unsigned int hdisplay, hsyncstart, hsyncend, htotal;
- unsigned int vdisplay, vsyncstart, vsyncend, vtotal;
+ unsigned int hdispend, hsyncstr, hsyncend, htotal, hblkstr, hblkend;
+ unsigned int vdispend, vsyncstr, vsyncend, vtotal, vblkstr, vblkend;
+ unsigned int linecomp;
u8 misc, crtcext1, crtcext2, crtcext5;
- hdisplay = mode->hdisplay / 8 - 1;
- hsyncstart = mode->hsync_start / 8 - 1;
- hsyncend = mode->hsync_end / 8 - 1;
- htotal = mode->htotal / 8 - 1;
-
+ hdispend = mode->crtc_hdisplay / 8 - 1;
+ hsyncstr = mode->crtc_hsync_start / 8 - 1;
+ hsyncend = mode->crtc_hsync_end / 8 - 1;
+ htotal = mode->crtc_htotal / 8 - 1;
/* Work around hardware quirk */
if ((htotal & 0x07) == 0x06 || (htotal & 0x07) == 0x04)
htotal++;
+ hblkstr = mode->crtc_hblank_start / 8 - 1;
+ hblkend = htotal;
+
+ vdispend = mode->crtc_vdisplay - 1;
+ vsyncstr = mode->crtc_vsync_start - 1;
+ vsyncend = mode->crtc_vsync_end - 1;
+ vtotal = mode->crtc_vtotal - 2;
+ vblkstr = mode->crtc_vblank_start;
+ vblkend = vtotal + 1;
- vdisplay = mode->vdisplay - 1;
- vsyncstart = mode->vsync_start - 1;
- vsyncend = mode->vsync_end - 1;
- vtotal = mode->vtotal - 2;
+ /*
+ * There's no VBLANK interrupt on Matrox chipsets, so we use
+ * the VLINE interrupt instead. It triggers when the current
+ * <linecomp> has been reached. For VBLANK, this is the first
+ * non-visible line at the bottom of the screen. Therefore,
+ * keep <linecomp> in sync with <vblkstr>.
+ */
+ linecomp = vblkstr;
misc = RREG8(MGA_MISC_IN);
@@ -235,45 +249,45 @@ void mgag200_set_mode_regs(struct mga_device *mdev, const struct drm_display_mod
misc &= ~MGAREG_MISC_VSYNCPOL;
crtcext1 = (((htotal - 4) & 0x100) >> 8) |
- ((hdisplay & 0x100) >> 7) |
- ((hsyncstart & 0x100) >> 6) |
- (htotal & 0x40);
- if (info->has_vidrst)
+ ((hblkstr & 0x100) >> 7) |
+ ((hsyncstr & 0x100) >> 6) |
+ (hblkend & 0x40);
+ if (set_vidrst)
crtcext1 |= MGAREG_CRTCEXT1_VRSTEN |
MGAREG_CRTCEXT1_HRSTEN;
crtcext2 = ((vtotal & 0xc00) >> 10) |
- ((vdisplay & 0x400) >> 8) |
- ((vdisplay & 0xc00) >> 7) |
- ((vsyncstart & 0xc00) >> 5) |
- ((vdisplay & 0x400) >> 3);
+ ((vdispend & 0x400) >> 8) |
+ ((vblkstr & 0xc00) >> 7) |
+ ((vsyncstr & 0xc00) >> 5) |
+ ((linecomp & 0x400) >> 3);
crtcext5 = 0x00;
- WREG_CRT(0, htotal - 4);
- WREG_CRT(1, hdisplay);
- WREG_CRT(2, hdisplay);
- WREG_CRT(3, (htotal & 0x1F) | 0x80);
- WREG_CRT(4, hsyncstart);
- WREG_CRT(5, ((htotal & 0x20) << 2) | (hsyncend & 0x1F));
- WREG_CRT(6, vtotal & 0xFF);
- WREG_CRT(7, ((vtotal & 0x100) >> 8) |
- ((vdisplay & 0x100) >> 7) |
- ((vsyncstart & 0x100) >> 6) |
- ((vdisplay & 0x100) >> 5) |
- ((vdisplay & 0x100) >> 4) | /* linecomp */
- ((vtotal & 0x200) >> 4) |
- ((vdisplay & 0x200) >> 3) |
- ((vsyncstart & 0x200) >> 2));
- WREG_CRT(9, ((vdisplay & 0x200) >> 4) |
- ((vdisplay & 0x200) >> 3));
- WREG_CRT(16, vsyncstart & 0xFF);
- WREG_CRT(17, (vsyncend & 0x0F) | 0x20);
- WREG_CRT(18, vdisplay & 0xFF);
- WREG_CRT(20, 0);
- WREG_CRT(21, vdisplay & 0xFF);
- WREG_CRT(22, (vtotal + 1) & 0xFF);
- WREG_CRT(23, 0xc3);
- WREG_CRT(24, vdisplay & 0xFF);
+ WREG_CRT(0x00, htotal - 4);
+ WREG_CRT(0x01, hdispend);
+ WREG_CRT(0x02, hblkstr);
+ WREG_CRT(0x03, (hblkend & 0x1f) | 0x80);
+ WREG_CRT(0x04, hsyncstr);
+ WREG_CRT(0x05, ((hblkend & 0x20) << 2) | (hsyncend & 0x1f));
+ WREG_CRT(0x06, vtotal & 0xff);
+ WREG_CRT(0x07, ((vtotal & 0x100) >> 8) |
+ ((vdispend & 0x100) >> 7) |
+ ((vsyncstr & 0x100) >> 6) |
+ ((vblkstr & 0x100) >> 5) |
+ ((linecomp & 0x100) >> 4) |
+ ((vtotal & 0x200) >> 4) |
+ ((vdispend & 0x200) >> 3) |
+ ((vsyncstr & 0x200) >> 2));
+ WREG_CRT(0x09, ((vblkstr & 0x200) >> 4) |
+ ((linecomp & 0x200) >> 3));
+ WREG_CRT(0x10, vsyncstr & 0xff);
+ WREG_CRT(0x11, (vsyncend & 0x0f) | 0x20);
+ WREG_CRT(0x12, vdispend & 0xff);
+ WREG_CRT(0x14, 0);
+ WREG_CRT(0x15, vblkstr & 0xff);
+ WREG_CRT(0x16, vblkend & 0xff);
+ WREG_CRT(0x17, 0xc3);
+ WREG_CRT(0x18, linecomp & 0xff);
WREG_ECRT(0x01, crtcext1);
WREG_ECRT(0x02, crtcext2);
@@ -631,6 +645,8 @@ void mgag200_crtc_helper_atomic_flush(struct drm_crtc *crtc, struct drm_atomic_s
struct mgag200_crtc_state *mgag200_crtc_state = to_mgag200_crtc_state(crtc_state);
struct drm_device *dev = crtc->dev;
struct mga_device *mdev = to_mga_device(dev);
+ struct drm_pending_vblank_event *event;
+ unsigned long flags;
if (crtc_state->enable && crtc_state->color_mgmt_changed) {
const struct drm_format_info *format = mgag200_crtc_state->format;
@@ -640,6 +656,18 @@ void mgag200_crtc_helper_atomic_flush(struct drm_crtc *crtc, struct drm_atomic_s
else
mgag200_crtc_set_gamma_linear(mdev, format);
}
+
+ event = crtc->state->event;
+ if (event) {
+ crtc->state->event = NULL;
+
+ spin_lock_irqsave(&dev->event_lock, flags);
+ if (drm_crtc_vblank_get(crtc) != 0)
+ drm_crtc_send_vblank_event(crtc, event);
+ else
+ drm_crtc_arm_vblank_event(crtc, event);
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+ }
}
void mgag200_crtc_helper_atomic_enable(struct drm_crtc *crtc, struct drm_atomic_state *old_state)
@@ -652,11 +680,8 @@ void mgag200_crtc_helper_atomic_enable(struct drm_crtc *crtc, struct drm_atomic_
struct mgag200_crtc_state *mgag200_crtc_state = to_mgag200_crtc_state(crtc_state);
const struct drm_format_info *format = mgag200_crtc_state->format;
- if (funcs->disable_vidrst)
- funcs->disable_vidrst(mdev);
-
mgag200_set_format_regs(mdev, format);
- mgag200_set_mode_regs(mdev, adjusted_mode);
+ mgag200_set_mode_regs(mdev, adjusted_mode, mgag200_crtc_state->set_vidrst);
if (funcs->pixpllc_atomic_update)
funcs->pixpllc_atomic_update(crtc, old_state);
@@ -668,22 +693,41 @@ void mgag200_crtc_helper_atomic_enable(struct drm_crtc *crtc, struct drm_atomic_
mgag200_enable_display(mdev);
- if (funcs->enable_vidrst)
- funcs->enable_vidrst(mdev);
+ drm_crtc_vblank_on(crtc);
}
void mgag200_crtc_helper_atomic_disable(struct drm_crtc *crtc, struct drm_atomic_state *old_state)
{
struct mga_device *mdev = to_mga_device(crtc->dev);
- const struct mgag200_device_funcs *funcs = mdev->funcs;
- if (funcs->disable_vidrst)
- funcs->disable_vidrst(mdev);
+ drm_crtc_vblank_off(crtc);
mgag200_disable_display(mdev);
+}
+
+bool mgag200_crtc_helper_get_scanout_position(struct drm_crtc *crtc, bool in_vblank_irq,
+ int *vpos, int *hpos,
+ ktime_t *stime, ktime_t *etime,
+ const struct drm_display_mode *mode)
+{
+ struct mga_device *mdev = to_mga_device(crtc->dev);
+ u32 vcount;
- if (funcs->enable_vidrst)
- funcs->enable_vidrst(mdev);
+ if (stime)
+ *stime = ktime_get();
+
+ if (vpos) {
+ vcount = RREG32(MGAREG_VCOUNT);
+ *vpos = vcount & GENMASK(11, 0);
+ }
+
+ if (hpos)
+ *hpos = mode->htotal >> 1; // near middle of scanline on average
+
+ if (etime)
+ *etime = ktime_get();
+
+ return true;
}
void mgag200_crtc_reset(struct drm_crtc *crtc)
@@ -717,6 +761,7 @@ struct drm_crtc_state *mgag200_crtc_atomic_duplicate_state(struct drm_crtc *crtc
new_mgag200_crtc_state->format = mgag200_crtc_state->format;
memcpy(&new_mgag200_crtc_state->pixpllc, &mgag200_crtc_state->pixpllc,
sizeof(new_mgag200_crtc_state->pixpllc));
+ new_mgag200_crtc_state->set_vidrst = mgag200_crtc_state->set_vidrst;
return &new_mgag200_crtc_state->base;
}
@@ -729,6 +774,30 @@ void mgag200_crtc_atomic_destroy_state(struct drm_crtc *crtc, struct drm_crtc_st
kfree(mgag200_crtc_state);
}
+int mgag200_crtc_enable_vblank(struct drm_crtc *crtc)
+{
+ struct mga_device *mdev = to_mga_device(crtc->dev);
+ u32 ien;
+
+ WREG32(MGAREG_ICLEAR, MGAREG_ICLEAR_VLINEICLR);
+
+ ien = RREG32(MGAREG_IEN);
+ ien |= MGAREG_IEN_VLINEIEN;
+ WREG32(MGAREG_IEN, ien);
+
+ return 0;
+}
+
+void mgag200_crtc_disable_vblank(struct drm_crtc *crtc)
+{
+ struct mga_device *mdev = to_mga_device(crtc->dev);
+ u32 ien;
+
+ ien = RREG32(MGAREG_IEN);
+ ien &= ~(MGAREG_IEN_VLINEIEN);
+ WREG32(MGAREG_IEN, ien);
+}
+
/*
* Mode config
*/
diff --git a/drivers/gpu/drm/mgag200/mgag200_reg.h b/drivers/gpu/drm/mgag200/mgag200_reg.h
index aa73463674e4..d4fef8f25871 100644
--- a/drivers/gpu/drm/mgag200/mgag200_reg.h
+++ b/drivers/gpu/drm/mgag200/mgag200_reg.h
@@ -102,10 +102,17 @@
#define MGAREG_EXEC 0x0100
#define MGAREG_FIFOSTATUS 0x1e10
+
#define MGAREG_STATUS 0x1e14
+#define MGAREG_STATUS_VLINEPEN BIT(5)
+
#define MGAREG_CACHEFLUSH 0x1fff
+
#define MGAREG_ICLEAR 0x1e18
+#define MGAREG_ICLEAR_VLINEICLR BIT(5)
+
#define MGAREG_IEN 0x1e1c
+#define MGAREG_IEN_VLINEIEN BIT(5)
#define MGAREG_VCOUNT 0x1e20
diff --git a/drivers/gpu/drm/mgag200/mgag200_vga_bmc.c b/drivers/gpu/drm/mgag200/mgag200_vga_bmc.c
new file mode 100644
index 000000000000..a5a3ac108bd5
--- /dev/null
+++ b/drivers/gpu/drm/mgag200/mgag200_vga_bmc.c
@@ -0,0 +1,156 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_edid.h>
+#include <drm/drm_modeset_helper_vtables.h>
+#include <drm/drm_probe_helper.h>
+
+#include "mgag200_ddc.h"
+#include "mgag200_drv.h"
+
+static void mgag200_vga_bmc_encoder_atomic_disable(struct drm_encoder *encoder,
+ struct drm_atomic_state *state)
+{
+ struct mga_device *mdev = to_mga_device(encoder->dev);
+
+ if (mdev->info->sync_bmc)
+ mgag200_bmc_stop_scanout(mdev);
+}
+
+static void mgag200_vga_bmc_encoder_atomic_enable(struct drm_encoder *encoder,
+ struct drm_atomic_state *state)
+{
+ struct mga_device *mdev = to_mga_device(encoder->dev);
+
+ if (mdev->info->sync_bmc)
+ mgag200_bmc_start_scanout(mdev);
+}
+
+static int mgag200_vga_bmc_encoder_atomic_check(struct drm_encoder *encoder,
+ struct drm_crtc_state *new_crtc_state,
+ struct drm_connector_state *new_connector_state)
+{
+ struct mga_device *mdev = to_mga_device(encoder->dev);
+ struct mgag200_crtc_state *new_mgag200_crtc_state = to_mgag200_crtc_state(new_crtc_state);
+
+ new_mgag200_crtc_state->set_vidrst = mdev->info->sync_bmc;
+
+ return 0;
+}
+
+static const struct drm_encoder_helper_funcs mgag200_dac_encoder_helper_funcs = {
+ .atomic_disable = mgag200_vga_bmc_encoder_atomic_disable,
+ .atomic_enable = mgag200_vga_bmc_encoder_atomic_enable,
+ .atomic_check = mgag200_vga_bmc_encoder_atomic_check,
+};
+
+static const struct drm_encoder_funcs mgag200_dac_encoder_funcs = {
+ .destroy = drm_encoder_cleanup
+};
+
+static int mgag200_vga_bmc_connector_helper_get_modes(struct drm_connector *connector)
+{
+ struct mga_device *mdev = to_mga_device(connector->dev);
+ const struct mgag200_device_info *minfo = mdev->info;
+ int count;
+
+ count = drm_connector_helper_get_modes(connector);
+
+ if (!count) {
+ /*
+ * There's no EDID data without a connected monitor. Set BMC-
+ * compatible modes in this case. The XGA default resolution
+ * should work well for all BMCs.
+ */
+ count = drm_add_modes_noedid(connector, minfo->max_hdisplay, minfo->max_vdisplay);
+ if (count)
+ drm_set_preferred_mode(connector, 1024, 768);
+ }
+
+ return count;
+}
+
+/*
+ * There's no monitor connected if the DDC did not return an EDID. Still
+ * return 'connected' as there's always a BMC. Incrementing the connector's
+ * epoch counter triggers an update of the related properties.
+ */
+static int mgag200_vga_bmc_connector_helper_detect_ctx(struct drm_connector *connector,
+ struct drm_modeset_acquire_ctx *ctx,
+ bool force)
+{
+ enum drm_connector_status old_status, status;
+
+ if (connector->edid_blob_ptr)
+ old_status = connector_status_connected;
+ else
+ old_status = connector_status_disconnected;
+
+ status = drm_connector_helper_detect_from_ddc(connector, ctx, force);
+
+ if (status != old_status)
+ ++connector->epoch_counter;
+ return connector_status_connected;
+}
+
+static const struct drm_connector_helper_funcs mgag200_vga_connector_helper_funcs = {
+ .get_modes = mgag200_vga_bmc_connector_helper_get_modes,
+ .detect_ctx = mgag200_vga_bmc_connector_helper_detect_ctx,
+};
+
+static const struct drm_connector_funcs mgag200_vga_connector_funcs = {
+ .reset = drm_atomic_helper_connector_reset,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = drm_connector_cleanup,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state
+};
+
+int mgag200_vga_bmc_output_init(struct mga_device *mdev)
+{
+ struct drm_device *dev = &mdev->base;
+ struct drm_crtc *crtc = &mdev->crtc;
+ struct drm_encoder *encoder;
+ struct drm_connector *connector;
+ struct i2c_adapter *ddc;
+ int ret;
+
+ encoder = &mdev->output.vga.encoder;
+ ret = drm_encoder_init(dev, encoder, &mgag200_dac_encoder_funcs,
+ DRM_MODE_ENCODER_DAC, NULL);
+ if (ret) {
+ drm_err(dev, "drm_encoder_init() failed: %d\n", ret);
+ return ret;
+ }
+ drm_encoder_helper_add(encoder, &mgag200_dac_encoder_helper_funcs);
+
+ encoder->possible_crtcs = drm_crtc_mask(crtc);
+
+ ddc = mgag200_ddc_create(mdev);
+ if (IS_ERR(ddc)) {
+ ret = PTR_ERR(ddc);
+ drm_err(dev, "failed to add DDC bus: %d\n", ret);
+ return ret;
+ }
+
+ connector = &mdev->output.vga.connector;
+ ret = drm_connector_init_with_ddc(dev, connector,
+ &mgag200_vga_connector_funcs,
+ DRM_MODE_CONNECTOR_VGA, ddc);
+ if (ret) {
+ drm_err(dev, "drm_connector_init_with_ddc() failed: %d\n", ret);
+ return ret;
+ }
+ drm_connector_helper_add(connector, &mgag200_vga_connector_helper_funcs);
+
+ connector->polled = DRM_CONNECTOR_POLL_CONNECT |
+ DRM_CONNECTOR_POLL_DISCONNECT;
+
+ ret = drm_connector_attach_encoder(connector, encoder);
+ if (ret) {
+ drm_err(dev, "drm_connector_attach_encoder() failed: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/mxsfb/lcdif_kms.c b/drivers/gpu/drm/mxsfb/lcdif_kms.c
index 2541d2de4e45..dbd42cc1da87 100644
--- a/drivers/gpu/drm/mxsfb/lcdif_kms.c
+++ b/drivers/gpu/drm/mxsfb/lcdif_kms.c
@@ -407,8 +407,7 @@ static void lcdif_crtc_mode_set_nofb(struct drm_crtc_state *crtc_state,
struct drm_display_mode *m = &crtc_state->adjusted_mode;
DRM_DEV_DEBUG_DRIVER(drm->dev, "Pixel clock: %dkHz (actual: %dkHz)\n",
- m->crtc_clock,
- (int)(clk_get_rate(lcdif->clk) / 1000));
+ m->clock, (int)(clk_get_rate(lcdif->clk) / 1000));
DRM_DEV_DEBUG_DRIVER(drm->dev, "Bridge bus_flags: 0x%08X\n",
lcdif_crtc_state->bus_flags);
DRM_DEV_DEBUG_DRIVER(drm->dev, "Mode flags: 0x%08X\n", m->flags);
@@ -538,7 +537,7 @@ static void lcdif_crtc_atomic_enable(struct drm_crtc *crtc,
struct drm_device *drm = lcdif->drm;
dma_addr_t paddr;
- clk_set_rate(lcdif->clk, m->crtc_clock * 1000);
+ clk_set_rate(lcdif->clk, m->clock * 1000);
pm_runtime_get_sync(drm->dev);
diff --git a/drivers/gpu/drm/nouveau/Kbuild b/drivers/gpu/drm/nouveau/Kbuild
index c32c01827c1d..7b863355c5c6 100644
--- a/drivers/gpu/drm/nouveau/Kbuild
+++ b/drivers/gpu/drm/nouveau/Kbuild
@@ -25,7 +25,6 @@ nouveau-$(CONFIG_COMPAT) += nouveau_ioc32.o
nouveau-$(CONFIG_LEDS_CLASS) += nouveau_led.o
nouveau-y += nouveau_nvif.o
nouveau-$(CONFIG_NOUVEAU_PLATFORM_DRIVER) += nouveau_platform.o
-nouveau-y += nouveau_usif.o # userspace <-> nvif
nouveau-y += nouveau_vga.o
# DRM - memory management
diff --git a/drivers/gpu/drm/nouveau/dispnv04/crtc.c b/drivers/gpu/drm/nouveau/dispnv04/crtc.c
index 4310ad71870b..67146f1e8482 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/crtc.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/crtc.c
@@ -118,8 +118,8 @@ static void nv_crtc_calc_state_ext(struct drm_crtc *crtc, struct drm_display_mod
{
struct drm_device *dev = crtc->dev;
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nvkm_bios *bios = nvxx_bios(&drm->client.device);
- struct nvkm_clk *clk = nvxx_clk(&drm->client.device);
+ struct nvkm_bios *bios = nvxx_bios(drm);
+ struct nvkm_clk *clk = nvxx_clk(drm);
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
struct nv04_mode_state *state = &nv04_display(dev)->mode_reg;
struct nv04_crtc_reg *regp = &state->crtc_reg[nv_crtc->index];
@@ -617,9 +617,15 @@ nv_crtc_swap_fbs(struct drm_crtc *crtc, struct drm_framebuffer *old_fb)
ret = nouveau_bo_pin(nvbo, NOUVEAU_GEM_DOMAIN_VRAM, false);
if (ret == 0) {
- if (disp->image[nv_crtc->index])
- nouveau_bo_unpin(disp->image[nv_crtc->index]);
- nouveau_bo_ref(nvbo, &disp->image[nv_crtc->index]);
+ if (disp->image[nv_crtc->index]) {
+ struct nouveau_bo *bo = disp->image[nv_crtc->index];
+
+ nouveau_bo_unpin(bo);
+ drm_gem_object_put(&bo->bo.base);
+ }
+
+ drm_gem_object_get(&nvbo->bo.base);
+ disp->image[nv_crtc->index] = nvbo;
}
return ret;
@@ -754,13 +760,17 @@ static void nv_crtc_destroy(struct drm_crtc *crtc)
drm_crtc_cleanup(crtc);
- if (disp->image[nv_crtc->index])
- nouveau_bo_unpin(disp->image[nv_crtc->index]);
- nouveau_bo_ref(NULL, &disp->image[nv_crtc->index]);
+ if (disp->image[nv_crtc->index]) {
+ struct nouveau_bo *bo = disp->image[nv_crtc->index];
+
+ nouveau_bo_unpin(bo);
+ drm_gem_object_put(&bo->bo.base);
+ disp->image[nv_crtc->index] = NULL;
+ }
nouveau_bo_unmap(nv_crtc->cursor.nvbo);
nouveau_bo_unpin(nv_crtc->cursor.nvbo);
- nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo);
+ nouveau_bo_fini(nv_crtc->cursor.nvbo);
nvif_event_dtor(&nv_crtc->vblank);
nvif_head_dtor(&nv_crtc->head);
kfree(nv_crtc);
@@ -794,9 +804,14 @@ nv_crtc_disable(struct drm_crtc *crtc)
{
struct nv04_display *disp = nv04_display(crtc->dev);
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
- if (disp->image[nv_crtc->index])
- nouveau_bo_unpin(disp->image[nv_crtc->index]);
- nouveau_bo_ref(NULL, &disp->image[nv_crtc->index]);
+
+ if (disp->image[nv_crtc->index]) {
+ struct nouveau_bo *bo = disp->image[nv_crtc->index];
+
+ nouveau_bo_unpin(bo);
+ drm_gem_object_put(&bo->bo.base);
+ disp->image[nv_crtc->index] = NULL;
+ }
}
static int
@@ -1042,7 +1057,7 @@ nv04_finish_page_flip(struct nouveau_channel *chan,
struct nv04_page_flip_state *ps)
{
struct nouveau_fence_chan *fctx = chan->fence;
- struct nouveau_drm *drm = chan->drm;
+ struct nouveau_drm *drm = chan->cli->drm;
struct drm_device *dev = drm->dev;
struct nv04_page_flip_state *s;
unsigned long flags;
@@ -1098,9 +1113,9 @@ nv04_page_flip_emit(struct nouveau_channel *chan,
struct nouveau_fence **pfence)
{
struct nouveau_fence_chan *fctx = chan->fence;
- struct nouveau_drm *drm = chan->drm;
+ struct nouveau_drm *drm = chan->cli->drm;
struct drm_device *dev = drm->dev;
- struct nvif_push *push = chan->chan.push;
+ struct nvif_push *push = &chan->chan.push;
unsigned long flags;
int ret;
@@ -1157,8 +1172,8 @@ nv04_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
chan = drm->channel;
if (!chan)
return -ENODEV;
- cli = (void *)chan->user.client;
- push = chan->chan.push;
+ cli = chan->cli;
+ push = &chan->chan.push;
s = kzalloc(sizeof(*s), GFP_KERNEL);
if (!s)
@@ -1210,7 +1225,11 @@ nv04_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
PUSH_NVSQ(push, NV05F, 0x0130, 0);
}
- nouveau_bo_ref(new_bo, &dispnv04->image[head]);
+ if (dispnv04->image[head])
+ drm_gem_object_put(&dispnv04->image[head]->bo.base);
+
+ drm_gem_object_get(&new_bo->bo.base);
+ dispnv04->image[head] = new_bo;
ret = nv04_page_flip_emit(chan, old_bo, new_bo, s, &fence);
if (ret)
@@ -1329,7 +1348,7 @@ nv04_crtc_create(struct drm_device *dev, int crtc_num)
nouveau_bo_unpin(nv_crtc->cursor.nvbo);
}
if (ret)
- nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo);
+ nouveau_bo_fini(nv_crtc->cursor.nvbo);
}
nv04_cursor_init(nv_crtc);
diff --git a/drivers/gpu/drm/nouveau/dispnv04/dac.c b/drivers/gpu/drm/nouveau/dispnv04/dac.c
index d6b8e0cce2ac..2e12bf136607 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/dac.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/dac.c
@@ -237,7 +237,7 @@ uint32_t nv17_dac_sample_load(struct drm_encoder *encoder)
struct drm_device *dev = encoder->dev;
struct nouveau_drm *drm = nouveau_drm(dev);
struct nvif_object *device = &nouveau_drm(dev)->client.device.object;
- struct nvkm_gpio *gpio = nvxx_gpio(&drm->client.device);
+ struct nvkm_gpio *gpio = nvxx_gpio(drm);
struct dcb_output *dcb = nouveau_encoder(encoder)->dcb;
uint32_t sample, testval, regoffset = nv04_dac_output_offset(encoder);
uint32_t saved_powerctrl_2 = 0, saved_powerctrl_4 = 0, saved_routput,
diff --git a/drivers/gpu/drm/nouveau/dispnv04/dfp.c b/drivers/gpu/drm/nouveau/dispnv04/dfp.c
index d5b129dc623b..504c421aa176 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/dfp.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/dfp.c
@@ -626,7 +626,7 @@ static void nv04_tmds_slave_init(struct drm_encoder *encoder)
struct drm_device *dev = encoder->dev;
struct dcb_output *dcb = nouveau_encoder(encoder)->dcb;
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nvkm_i2c *i2c = nvxx_i2c(&drm->client.device);
+ struct nvkm_i2c *i2c = nvxx_i2c(drm);
struct nvkm_i2c_bus *bus = nvkm_i2c_bus_find(i2c, NVKM_I2C_BUS_PRI);
struct nvkm_i2c_bus_probe info[] = {
{
diff --git a/drivers/gpu/drm/nouveau/dispnv04/disp.c b/drivers/gpu/drm/nouveau/dispnv04/disp.c
index 4b7497a8755c..f71199a39bc4 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/disp.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/disp.c
@@ -189,7 +189,6 @@ static void
nv04_display_destroy(struct drm_device *dev)
{
struct nv04_display *disp = nv04_display(dev);
- struct nouveau_drm *drm = nouveau_drm(dev);
struct nouveau_encoder *encoder;
struct nouveau_crtc *nv_crtc;
@@ -206,15 +205,13 @@ nv04_display_destroy(struct drm_device *dev)
nouveau_display(dev)->priv = NULL;
vfree(disp);
-
- nvif_object_unmap(&drm->client.device.object);
}
int
nv04_display_create(struct drm_device *dev)
{
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nvkm_i2c *i2c = nvxx_i2c(&drm->client.device);
+ struct nvkm_i2c *i2c = nvxx_i2c(drm);
struct dcb_table *dcb = &drm->vbios.dcb;
struct drm_connector *connector, *ct;
struct drm_encoder *encoder;
@@ -229,8 +226,6 @@ nv04_display_create(struct drm_device *dev)
disp->drm = drm;
- nvif_object_map(&drm->client.device.object, NULL, 0);
-
nouveau_display(dev)->priv = disp;
nouveau_display(dev)->dtor = nv04_display_destroy;
nouveau_display(dev)->init = nv04_display_init;
diff --git a/drivers/gpu/drm/nouveau/dispnv04/disp.h b/drivers/gpu/drm/nouveau/dispnv04/disp.h
index 11a6663758ec..85ec0f534392 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/disp.h
+++ b/drivers/gpu/drm/nouveau/dispnv04/disp.h
@@ -176,7 +176,7 @@ static inline void
nouveau_bios_run_init_table(struct drm_device *dev, u16 table,
struct dcb_output *outp, int crtc)
{
- nvbios_init(&nvxx_bios(&nouveau_drm(dev)->client.device)->subdev, table,
+ nvbios_init(&nvxx_bios(nouveau_drm(dev))->subdev, table,
init.outp = outp;
init.head = crtc;
);
diff --git a/drivers/gpu/drm/nouveau/dispnv04/hw.c b/drivers/gpu/drm/nouveau/dispnv04/hw.c
index f7d35657aa64..8b376f9c8746 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/hw.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/hw.c
@@ -166,7 +166,7 @@ nouveau_hw_get_pllvals(struct drm_device *dev, enum nvbios_pll_type plltype,
{
struct nouveau_drm *drm = nouveau_drm(dev);
struct nvif_object *device = &drm->client.device.object;
- struct nvkm_bios *bios = nvxx_bios(&drm->client.device);
+ struct nvkm_bios *bios = nvxx_bios(drm);
uint32_t reg1, pll1, pll2 = 0;
struct nvbios_pll pll_lim;
int ret;
@@ -258,9 +258,8 @@ nouveau_hw_fix_bad_vpll(struct drm_device *dev, int head)
*/
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nvif_device *device = &drm->client.device;
- struct nvkm_clk *clk = nvxx_clk(device);
- struct nvkm_bios *bios = nvxx_bios(device);
+ struct nvkm_clk *clk = nvxx_clk(drm);
+ struct nvkm_bios *bios = nvxx_bios(drm);
struct nvbios_pll pll_lim;
struct nvkm_pll_vals pv;
enum nvbios_pll_type pll = head ? PLL_VPLL1 : PLL_VPLL0;
@@ -470,7 +469,7 @@ nv_load_state_ramdac(struct drm_device *dev, int head,
struct nv04_mode_state *state)
{
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nvkm_clk *clk = nvxx_clk(&drm->client.device);
+ struct nvkm_clk *clk = nvxx_clk(drm);
struct nv04_crtc_reg *regp = &state->crtc_reg[head];
uint32_t pllreg = head ? NV_RAMDAC_VPLL2 : NV_PRAMDAC_VPLL_COEFF;
int i;
diff --git a/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c b/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c
index de3ea731d6e6..d3014027a812 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c
@@ -53,7 +53,7 @@ static struct nvkm_i2c_bus_probe nv04_tv_encoder_info[] = {
int nv04_tv_identify(struct drm_device *dev, int i2c_index)
{
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nvkm_i2c *i2c = nvxx_i2c(&drm->client.device);
+ struct nvkm_i2c *i2c = nvxx_i2c(drm);
struct nvkm_i2c_bus *bus = nvkm_i2c_bus_find(i2c, i2c_index);
if (bus) {
return nvkm_i2c_bus_probe(bus, "TV encoder",
@@ -205,7 +205,7 @@ nv04_tv_create(struct drm_connector *connector, struct dcb_output *entry)
struct drm_encoder *encoder;
struct drm_device *dev = connector->dev;
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nvkm_i2c *i2c = nvxx_i2c(&drm->client.device);
+ struct nvkm_i2c *i2c = nvxx_i2c(drm);
struct nvkm_i2c_bus *bus = nvkm_i2c_bus_find(i2c, entry->i2c_index);
int type, ret;
diff --git a/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c b/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c
index 2033214c4b78..3ecb101d23e9 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c
@@ -47,7 +47,7 @@ static uint32_t nv42_tv_sample_load(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nvkm_gpio *gpio = nvxx_gpio(&drm->client.device);
+ struct nvkm_gpio *gpio = nvxx_gpio(drm);
uint32_t testval, regoffset = nv04_dac_output_offset(encoder);
uint32_t gpio0, gpio1, fp_htotal, fp_hsync_start, fp_hsync_end,
fp_control, test_ctrl, dacclk, ctv_14, ctv_1c, ctv_6c;
@@ -131,7 +131,7 @@ static bool
get_tv_detect_quirks(struct drm_device *dev, uint32_t *pin_mask)
{
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nvkm_device *device = nvxx_device(&drm->client.device);
+ struct nvkm_device *device = nvxx_device(drm);
if (device->quirk && device->quirk->tv_pin_mask) {
*pin_mask = device->quirk->tv_pin_mask;
@@ -369,7 +369,7 @@ static void nv17_tv_dpms(struct drm_encoder *encoder, int mode)
{
struct drm_device *dev = encoder->dev;
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nvkm_gpio *gpio = nvxx_gpio(&drm->client.device);
+ struct nvkm_gpio *gpio = nvxx_gpio(drm);
struct nv17_tv_state *regs = &to_tv_enc(encoder)->state;
struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder);
diff --git a/drivers/gpu/drm/nouveau/dispnv50/base507c.c b/drivers/gpu/drm/nouveau/dispnv50/base507c.c
index 70c62b861276..a431f6c5f6fa 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/base507c.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/base507c.c
@@ -35,7 +35,7 @@
int
base507c_update(struct nv50_wndw *wndw, u32 *interlock)
{
- struct nvif_push *push = wndw->wndw.push;
+ struct nvif_push *push = &wndw->wndw.push;
int ret;
if ((ret = PUSH_WAIT(push, 2)))
@@ -48,7 +48,7 @@ base507c_update(struct nv50_wndw *wndw, u32 *interlock)
int
base507c_image_clr(struct nv50_wndw *wndw)
{
- struct nvif_push *push = wndw->wndw.push;
+ struct nvif_push *push = &wndw->wndw.push;
int ret;
if ((ret = PUSH_WAIT(push, 4)))
@@ -65,7 +65,7 @@ base507c_image_clr(struct nv50_wndw *wndw)
static int
base507c_image_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
{
- struct nvif_push *push = wndw->wndw.push;
+ struct nvif_push *push = &wndw->wndw.push;
int ret;
if ((ret = PUSH_WAIT(push, 13)))
@@ -118,7 +118,7 @@ base507c_image_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
int
base507c_xlut_clr(struct nv50_wndw *wndw)
{
- struct nvif_push *push = wndw->wndw.push;
+ struct nvif_push *push = &wndw->wndw.push;
int ret;
if ((ret = PUSH_WAIT(push, 2)))
@@ -132,7 +132,7 @@ base507c_xlut_clr(struct nv50_wndw *wndw)
int
base507c_xlut_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
{
- struct nvif_push *push = wndw->wndw.push;
+ struct nvif_push *push = &wndw->wndw.push;
int ret;
if ((ret = PUSH_WAIT(push, 2)))
@@ -158,7 +158,7 @@ base507c_ntfy_wait_begun(struct nouveau_bo *bo, u32 offset,
int
base507c_ntfy_clr(struct nv50_wndw *wndw)
{
- struct nvif_push *push = wndw->wndw.push;
+ struct nvif_push *push = &wndw->wndw.push;
int ret;
if ((ret = PUSH_WAIT(push, 2)))
@@ -171,7 +171,7 @@ base507c_ntfy_clr(struct nv50_wndw *wndw)
int
base507c_ntfy_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
{
- struct nvif_push *push = wndw->wndw.push;
+ struct nvif_push *push = &wndw->wndw.push;
int ret;
if ((ret = PUSH_WAIT(push, 3)))
@@ -195,7 +195,7 @@ base507c_ntfy_reset(struct nouveau_bo *bo, u32 offset)
int
base507c_sema_clr(struct nv50_wndw *wndw)
{
- struct nvif_push *push = wndw->wndw.push;
+ struct nvif_push *push = &wndw->wndw.push;
int ret;
if ((ret = PUSH_WAIT(push, 2)))
@@ -208,7 +208,7 @@ base507c_sema_clr(struct nv50_wndw *wndw)
int
base507c_sema_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
{
- struct nvif_push *push = wndw->wndw.push;
+ struct nvif_push *push = &wndw->wndw.push;
int ret;
if ((ret = PUSH_WAIT(push, 5)))
@@ -307,7 +307,6 @@ base507c_new_(const struct nv50_wndw_func *func, const u32 *format,
struct nvif_disp_chan_v0 args = {
.id = head,
};
- struct nouveau_display *disp = nouveau_display(drm->dev);
struct nv50_disp *disp50 = nv50_disp(drm->dev);
struct nv50_wndw *wndw;
int ret;
@@ -318,7 +317,7 @@ base507c_new_(const struct nv50_wndw_func *func, const u32 *format,
if (*pwndw = wndw, ret)
return ret;
- ret = nv50_dmac_create(&drm->client.device, &disp->disp.object,
+ ret = nv50_dmac_create(drm,
&oclass, head, &args, sizeof(args),
disp50->sync->offset, &wndw->wndw);
if (ret) {
diff --git a/drivers/gpu/drm/nouveau/dispnv50/base827c.c b/drivers/gpu/drm/nouveau/dispnv50/base827c.c
index 093d4ba6910e..4545cc5f3a14 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/base827c.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/base827c.c
@@ -28,7 +28,7 @@
static int
base827c_image_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
{
- struct nvif_push *push = wndw->wndw.push;
+ struct nvif_push *push = &wndw->wndw.push;
int ret;
if ((ret = PUSH_WAIT(push, 13)))
diff --git a/drivers/gpu/drm/nouveau/dispnv50/base907c.c b/drivers/gpu/drm/nouveau/dispnv50/base907c.c
index e6b0417c325b..4a2d5a259e15 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/base907c.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/base907c.c
@@ -28,7 +28,7 @@
static int
base907c_image_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
{
- struct nvif_push *push = wndw->wndw.push;
+ struct nvif_push *push = &wndw->wndw.push;
int ret;
if ((ret = PUSH_WAIT(push, 10)))
@@ -65,7 +65,7 @@ base907c_image_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
static int
base907c_xlut_clr(struct nv50_wndw *wndw)
{
- struct nvif_push *push = wndw->wndw.push;
+ struct nvif_push *push = &wndw->wndw.push;
int ret;
if ((ret = PUSH_WAIT(push, 6)))
@@ -84,7 +84,7 @@ base907c_xlut_clr(struct nv50_wndw *wndw)
static int
base907c_xlut_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
{
- struct nvif_push *push = wndw->wndw.push;
+ struct nvif_push *push = &wndw->wndw.push;
int ret;
if ((ret = PUSH_WAIT(push, 6)))
@@ -156,7 +156,7 @@ base907c_csc(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw,
static int
base907c_csc_clr(struct nv50_wndw *wndw)
{
- struct nvif_push *push = wndw->wndw.push;
+ struct nvif_push *push = &wndw->wndw.push;
int ret;
if ((ret = PUSH_WAIT(push, 2)))
@@ -170,7 +170,7 @@ base907c_csc_clr(struct nv50_wndw *wndw)
static int
base907c_csc_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
{
- struct nvif_push *push = wndw->wndw.push;
+ struct nvif_push *push = &wndw->wndw.push;
int ret;
if ((ret = PUSH_WAIT(push, 13)))
diff --git a/drivers/gpu/drm/nouveau/dispnv50/core507d.c b/drivers/gpu/drm/nouveau/dispnv50/core507d.c
index e5bb5ca950c8..ce2cb78bbdd3 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/core507d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/core507d.c
@@ -33,7 +33,7 @@
int
core507d_update(struct nv50_core *core, u32 *interlock, bool ntfy)
{
- struct nvif_push *push = core->chan.push;
+ struct nvif_push *push = &core->chan.push;
int ret;
if ((ret = PUSH_WAIT(push, (ntfy ? 2 : 0) + 3)))
@@ -80,7 +80,7 @@ core507d_ntfy_init(struct nouveau_bo *bo, u32 offset)
int
core507d_read_caps(struct nv50_disp *disp)
{
- struct nvif_push *push = disp->core->chan.push;
+ struct nvif_push *push = &disp->core->chan.push;
int ret;
ret = PUSH_WAIT(push, 6);
@@ -130,7 +130,7 @@ core507d_caps_init(struct nouveau_drm *drm, struct nv50_disp *disp)
int
core507d_init(struct nv50_core *core)
{
- struct nvif_push *push = core->chan.push;
+ struct nvif_push *push = &core->chan.push;
int ret;
if ((ret = PUSH_WAIT(push, 2)))
@@ -166,7 +166,7 @@ core507d_new_(const struct nv50_core_func *func, struct nouveau_drm *drm,
return -ENOMEM;
core->func = func;
- ret = nv50_dmac_create(&drm->client.device, &disp->disp->object,
+ ret = nv50_dmac_create(drm,
&oclass, 0, &args, sizeof(args),
disp->sync->offset, &core->chan);
if (ret) {
diff --git a/drivers/gpu/drm/nouveau/dispnv50/corec37d.c b/drivers/gpu/drm/nouveau/dispnv50/corec37d.c
index 42f877f2ced2..7f637b8830be 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/corec37d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/corec37d.c
@@ -33,7 +33,7 @@
int
corec37d_wndw_owner(struct nv50_core *core)
{
- struct nvif_push *push = core->chan.push;
+ struct nvif_push *push = &core->chan.push;
const u32 windows = 8; /*XXX*/
int ret, i;
@@ -51,7 +51,7 @@ corec37d_wndw_owner(struct nv50_core *core)
int
corec37d_update(struct nv50_core *core, u32 *interlock, bool ntfy)
{
- struct nvif_push *push = core->chan.push;
+ struct nvif_push *push = &core->chan.push;
int ret;
if ((ret = PUSH_WAIT(push, (ntfy ? 2 * 2 : 0) + 5)))
@@ -127,7 +127,7 @@ int corec37d_caps_init(struct nouveau_drm *drm, struct nv50_disp *disp)
static int
corec37d_init(struct nv50_core *core)
{
- struct nvif_push *push = core->chan.push;
+ struct nvif_push *push = &core->chan.push;
const u32 windows = 8; /*XXX*/
int ret, i;
diff --git a/drivers/gpu/drm/nouveau/dispnv50/corec57d.c b/drivers/gpu/drm/nouveau/dispnv50/corec57d.c
index 53b1e2a569c1..421d0d57e1d8 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/corec57d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/corec57d.c
@@ -29,7 +29,7 @@
static int
corec57d_init(struct nv50_core *core)
{
- struct nvif_push *push = core->chan.push;
+ struct nvif_push *push = &core->chan.push;
const u32 windows = 8; /*XXX*/
int ret, i;
diff --git a/drivers/gpu/drm/nouveau/dispnv50/crc907d.c b/drivers/gpu/drm/nouveau/dispnv50/crc907d.c
index f9ad641555b7..a674ba435b05 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/crc907d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/crc907d.c
@@ -26,7 +26,7 @@ static int
crc907d_set_src(struct nv50_head *head, int or, enum nv50_crc_source_type source,
struct nv50_crc_notifier_ctx *ctx)
{
- struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ struct nvif_push *push = &nv50_disp(head->base.base.dev)->core->chan.push;
const int i = head->base.index;
u32 crc_args = NVDEF(NV907D, HEAD_SET_CRC_CONTROL, CONTROLLING_CHANNEL, CORE) |
NVDEF(NV907D, HEAD_SET_CRC_CONTROL, EXPECT_BUFFER_COLLAPSE, FALSE) |
@@ -74,7 +74,7 @@ crc907d_set_src(struct nv50_head *head, int or, enum nv50_crc_source_type source
static int
crc907d_set_ctx(struct nv50_head *head, struct nv50_crc_notifier_ctx *ctx)
{
- struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ struct nvif_push *push = &nv50_disp(head->base.base.dev)->core->chan.push;
const int i = head->base.index;
int ret;
diff --git a/drivers/gpu/drm/nouveau/dispnv50/crcc37d.c b/drivers/gpu/drm/nouveau/dispnv50/crcc37d.c
index f10f6c484408..4821ce32f9ed 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/crcc37d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/crcc37d.c
@@ -15,7 +15,7 @@ static int
crcc37d_set_src(struct nv50_head *head, int or, enum nv50_crc_source_type source,
struct nv50_crc_notifier_ctx *ctx)
{
- struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ struct nvif_push *push = &nv50_disp(head->base.base.dev)->core->chan.push;
const int i = head->base.index;
u32 crc_args = NVVAL(NVC37D, HEAD_SET_CRC_CONTROL, CONTROLLING_CHANNEL, i * 4) |
NVDEF(NVC37D, HEAD_SET_CRC_CONTROL, EXPECT_BUFFER_COLLAPSE, FALSE) |
@@ -53,7 +53,7 @@ crcc37d_set_src(struct nv50_head *head, int or, enum nv50_crc_source_type source
int crcc37d_set_ctx(struct nv50_head *head, struct nv50_crc_notifier_ctx *ctx)
{
- struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ struct nvif_push *push = &nv50_disp(head->base.base.dev)->core->chan.push;
const int i = head->base.index;
int ret;
diff --git a/drivers/gpu/drm/nouveau/dispnv50/crcc57d.c b/drivers/gpu/drm/nouveau/dispnv50/crcc57d.c
index cc0130e3d496..ad591dcb0bc9 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/crcc57d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/crcc57d.c
@@ -13,7 +13,7 @@
static int crcc57d_set_src(struct nv50_head *head, int or, enum nv50_crc_source_type source,
struct nv50_crc_notifier_ctx *ctx)
{
- struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ struct nvif_push *push = &nv50_disp(head->base.base.dev)->core->chan.push;
const int i = head->base.index;
u32 crc_args = NVDEF(NVC57D, HEAD_SET_CRC_CONTROL, CONTROLLING_CHANNEL, CORE) |
NVDEF(NVC57D, HEAD_SET_CRC_CONTROL, EXPECT_BUFFER_COLLAPSE, FALSE) |
diff --git a/drivers/gpu/drm/nouveau/dispnv50/dac507d.c b/drivers/gpu/drm/nouveau/dispnv50/dac507d.c
index 09de78d96679..99ae692f219e 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/dac507d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/dac507d.c
@@ -29,7 +29,7 @@ static int
dac507d_ctrl(struct nv50_core *core, int or, u32 ctrl,
struct nv50_head_atom *asyh)
{
- struct nvif_push *push = core->chan.push;
+ struct nvif_push *push = &core->chan.push;
u32 sync = 0;
int ret;
diff --git a/drivers/gpu/drm/nouveau/dispnv50/dac907d.c b/drivers/gpu/drm/nouveau/dispnv50/dac907d.c
index 95efa625b691..74bc9f81e3f1 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/dac907d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/dac907d.c
@@ -29,7 +29,7 @@ static int
dac907d_ctrl(struct nv50_core *core, int or, u32 ctrl,
struct nv50_head_atom *asyh)
{
- struct nvif_push *push = core->chan.push;
+ struct nvif_push *push = &core->chan.push;
int ret;
if ((ret = PUSH_WAIT(push, 2)))
diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c
index 0efd6b4906cf..e4c8ce6dd40a 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/disp.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c
@@ -93,8 +93,11 @@ nv50_chan_create(struct nvif_device *device, struct nvif_object *disp,
ret = nvif_object_ctor(disp, "kmsChan", 0,
oclass[0], data, size,
&chan->user);
- if (ret == 0)
- nvif_object_map(&chan->user, NULL, 0);
+ if (ret == 0) {
+ ret = nvif_object_map(&chan->user, NULL, 0);
+ if (ret)
+ nvif_object_dtor(&chan->user);
+ }
nvif_object_sclass_put(&sclass);
return ret;
}
@@ -124,20 +127,20 @@ nv50_dmac_destroy(struct nv50_dmac *dmac)
nv50_chan_destroy(&dmac->base);
- nvif_mem_dtor(&dmac->_push.mem);
+ nvif_mem_dtor(&dmac->push.mem);
}
static void
nv50_dmac_kick(struct nvif_push *push)
{
- struct nv50_dmac *dmac = container_of(push, typeof(*dmac), _push);
+ struct nv50_dmac *dmac = container_of(push, typeof(*dmac), push);
- dmac->cur = push->cur - (u32 __iomem *)dmac->_push.mem.object.map.ptr;
+ dmac->cur = push->cur - (u32 __iomem *)dmac->push.mem.object.map.ptr;
if (dmac->put != dmac->cur) {
/* Push buffer fetches are not coherent with BAR1, we need to ensure
* writes have been flushed right through to VRAM before writing PUT.
*/
- if (dmac->push->mem.type & NVIF_MEM_VRAM) {
+ if (dmac->push.mem.type & NVIF_MEM_VRAM) {
struct nvif_device *device = dmac->base.device;
nvif_wr32(&device->object, 0x070000, 0x00000001);
nvif_msec(device, 2000,
@@ -172,7 +175,7 @@ nv50_dmac_wind(struct nv50_dmac *dmac)
if (get == 0) {
/* Corner-case, HW idle, but non-committed work pending. */
if (dmac->put == 0)
- nv50_dmac_kick(dmac->push);
+ nv50_dmac_kick(&dmac->push);
if (nvif_msec(dmac->base.device, 2000,
if (NVIF_TV32(&dmac->base.user, NV507C, GET, PTR, >, 0))
@@ -181,7 +184,7 @@ nv50_dmac_wind(struct nv50_dmac *dmac)
return -ETIMEDOUT;
}
- PUSH_RSVD(dmac->push, PUSH_JUMP(dmac->push, 0));
+ PUSH_RSVD(&dmac->push, PUSH_JUMP(&dmac->push, 0));
dmac->cur = 0;
return 0;
}
@@ -189,19 +192,19 @@ nv50_dmac_wind(struct nv50_dmac *dmac)
static int
nv50_dmac_wait(struct nvif_push *push, u32 size)
{
- struct nv50_dmac *dmac = container_of(push, typeof(*dmac), _push);
+ struct nv50_dmac *dmac = container_of(push, typeof(*dmac), push);
int free;
if (WARN_ON(size > dmac->max))
return -EINVAL;
- dmac->cur = push->cur - (u32 __iomem *)dmac->_push.mem.object.map.ptr;
+ dmac->cur = push->cur - (u32 __iomem *)dmac->push.mem.object.map.ptr;
if (dmac->cur + size >= dmac->max) {
int ret = nv50_dmac_wind(dmac);
if (ret)
return ret;
- push->cur = dmac->_push.mem.object.map.ptr;
+ push->cur = dmac->push.mem.object.map.ptr;
push->cur = push->cur + dmac->cur;
nv50_dmac_kick(push);
}
@@ -214,7 +217,7 @@ nv50_dmac_wait(struct nvif_push *push, u32 size)
return -ETIMEDOUT;
}
- push->bgn = dmac->_push.mem.object.map.ptr;
+ push->bgn = dmac->push.mem.object.map.ptr;
push->bgn = push->bgn + dmac->cur;
push->cur = push->bgn;
push->end = push->cur + free;
@@ -226,17 +229,16 @@ static int nv50_dmac_vram_pushbuf = -1;
module_param_named(kms_vram_pushbuf, nv50_dmac_vram_pushbuf, int, 0400);
int
-nv50_dmac_create(struct nvif_device *device, struct nvif_object *disp,
+nv50_dmac_create(struct nouveau_drm *drm,
const s32 *oclass, u8 head, void *data, u32 size, s64 syncbuf,
struct nv50_dmac *dmac)
{
- struct nouveau_cli *cli = (void *)device->object.client;
+ struct nvif_device *device = &drm->device;
+ struct nvif_object *disp = &drm->display->disp.object;
struct nvif_disp_chan_v0 *args = data;
u8 type = NVIF_MEM_COHERENT;
int ret;
- mutex_init(&dmac->lock);
-
/* Pascal added support for 47-bit physical addresses, but some
* parts of EVO still only accept 40-bit PAs.
*
@@ -250,18 +252,15 @@ nv50_dmac_create(struct nvif_device *device, struct nvif_object *disp,
(nv50_dmac_vram_pushbuf < 0 && device->info.family == NV_DEVICE_INFO_V0_PASCAL))
type |= NVIF_MEM_VRAM;
- ret = nvif_mem_ctor_map(&cli->mmu, "kmsChanPush", type, 0x1000,
- &dmac->_push.mem);
+ ret = nvif_mem_ctor_map(&drm->mmu, "kmsChanPush", type, 0x1000, &dmac->push.mem);
if (ret)
return ret;
- dmac->ptr = dmac->_push.mem.object.map.ptr;
- dmac->_push.wait = nv50_dmac_wait;
- dmac->_push.kick = nv50_dmac_kick;
- dmac->push = &dmac->_push;
- dmac->push->bgn = dmac->_push.mem.object.map.ptr;
- dmac->push->cur = dmac->push->bgn;
- dmac->push->end = dmac->push->bgn;
+ dmac->push.wait = nv50_dmac_wait;
+ dmac->push.kick = nv50_dmac_kick;
+ dmac->push.bgn = dmac->push.mem.object.map.ptr;
+ dmac->push.cur = dmac->push.bgn;
+ dmac->push.end = dmac->push.bgn;
dmac->max = 0x1000/4 - 1;
/* EVO channels are affected by a HW bug where the last 12 DWORDs
@@ -270,7 +269,7 @@ nv50_dmac_create(struct nvif_device *device, struct nvif_object *disp,
if (disp->oclass < GV100_DISP)
dmac->max -= 12;
- args->pushbuf = nvif_handle(&dmac->_push.mem.object);
+ args->pushbuf = nvif_handle(&dmac->push.mem.object);
ret = nv50_chan_create(device, disp, oclass, head, data, size,
&dmac->base);
@@ -558,7 +557,7 @@ nv50_dac_create(struct nouveau_encoder *nv_encoder)
{
struct drm_connector *connector = &nv_encoder->conn->base;
struct nouveau_drm *drm = nouveau_drm(connector->dev);
- struct nvkm_i2c *i2c = nvxx_i2c(&drm->client.device);
+ struct nvkm_i2c *i2c = nvxx_i2c(drm);
struct nvkm_i2c_bus *bus;
struct drm_encoder *encoder;
struct dcb_output *dcbe = nv_encoder->dcb;
@@ -593,8 +592,7 @@ static int
nv50_audio_component_get_eld(struct device *kdev, int port, int dev_id,
bool *enabled, unsigned char *buf, int max_bytes)
{
- struct drm_device *drm_dev = dev_get_drvdata(kdev);
- struct nouveau_drm *drm = nouveau_drm(drm_dev);
+ struct nouveau_drm *drm = dev_get_drvdata(kdev);
struct drm_encoder *encoder;
struct nouveau_encoder *nv_encoder;
struct nouveau_crtc *nv_crtc;
@@ -639,18 +637,17 @@ static int
nv50_audio_component_bind(struct device *kdev, struct device *hda_kdev,
void *data)
{
- struct drm_device *drm_dev = dev_get_drvdata(kdev);
- struct nouveau_drm *drm = nouveau_drm(drm_dev);
+ struct nouveau_drm *drm = dev_get_drvdata(kdev);
struct drm_audio_component *acomp = data;
if (WARN_ON(!device_link_add(hda_kdev, kdev, DL_FLAG_STATELESS)))
return -ENOMEM;
- drm_modeset_lock_all(drm_dev);
+ drm_modeset_lock_all(drm->dev);
acomp->ops = &nv50_audio_component_ops;
acomp->dev = kdev;
drm->audio.component = acomp;
- drm_modeset_unlock_all(drm_dev);
+ drm_modeset_unlock_all(drm->dev);
return 0;
}
@@ -658,15 +655,14 @@ static void
nv50_audio_component_unbind(struct device *kdev, struct device *hda_kdev,
void *data)
{
- struct drm_device *drm_dev = dev_get_drvdata(kdev);
- struct nouveau_drm *drm = nouveau_drm(drm_dev);
+ struct nouveau_drm *drm = dev_get_drvdata(kdev);
struct drm_audio_component *acomp = data;
- drm_modeset_lock_all(drm_dev);
+ drm_modeset_lock_all(drm->dev);
drm->audio.component = NULL;
acomp->ops = NULL;
acomp->dev = NULL;
- drm_modeset_unlock_all(drm_dev);
+ drm_modeset_unlock_all(drm->dev);
}
static const struct component_ops nv50_audio_component_bind_ops = {
@@ -1884,7 +1880,7 @@ nv50_sor_create(struct nouveau_encoder *nv_encoder)
struct drm_connector *connector = &nv_encoder->conn->base;
struct nouveau_connector *nv_connector = nouveau_connector(connector);
struct nouveau_drm *drm = nouveau_drm(connector->dev);
- struct nvkm_i2c *i2c = nvxx_i2c(&drm->client.device);
+ struct nvkm_i2c *i2c = nvxx_i2c(drm);
struct drm_encoder *encoder;
struct dcb_output *dcbe = nv_encoder->dcb;
struct nv50_disp *disp = nv50_disp(connector->dev);
@@ -2051,7 +2047,7 @@ nv50_pior_create(struct nouveau_encoder *nv_encoder)
struct drm_device *dev = connector->dev;
struct nouveau_drm *drm = nouveau_drm(dev);
struct nv50_disp *disp = nv50_disp(dev);
- struct nvkm_i2c *i2c = nvxx_i2c(&drm->client.device);
+ struct nvkm_i2c *i2c = nvxx_i2c(drm);
struct nvkm_i2c_bus *bus = NULL;
struct nvkm_i2c_aux *aux = NULL;
struct i2c_adapter *ddc;
@@ -2819,7 +2815,7 @@ nv50_display_destroy(struct drm_device *dev)
nouveau_bo_unmap(disp->sync);
if (disp->sync)
nouveau_bo_unpin(disp->sync);
- nouveau_bo_ref(NULL, &disp->sync);
+ nouveau_bo_fini(disp->sync);
nouveau_display(dev)->priv = NULL;
kfree(disp);
@@ -2862,7 +2858,7 @@ nv50_display_create(struct drm_device *dev)
nouveau_bo_unpin(disp->sync);
}
if (ret)
- nouveau_bo_ref(NULL, &disp->sync);
+ nouveau_bo_fini(disp->sync);
}
if (ret)
diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.h b/drivers/gpu/drm/nouveau/dispnv50/disp.h
index 5508a7cfd492..15f9242b72ac 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/disp.h
+++ b/drivers/gpu/drm/nouveau/dispnv50/disp.h
@@ -62,18 +62,11 @@ struct nv50_chan {
struct nv50_dmac {
struct nv50_chan base;
- struct nvif_push _push;
- struct nvif_push *push;
- u32 *ptr;
+ struct nvif_push push;
struct nvif_object sync;
struct nvif_object vram;
- /* Protects against concurrent pushbuf access to this channel, lock is
- * grabbed by evo_wait (if the pushbuf reservation is successful) and
- * dropped again by evo_kick. */
- struct mutex lock;
-
u32 cur;
u32 put;
u32 max;
@@ -95,7 +88,7 @@ struct nv50_outp_atom {
} set, clr;
};
-int nv50_dmac_create(struct nvif_device *device, struct nvif_object *disp,
+int nv50_dmac_create(struct nouveau_drm *,
const s32 *oclass, u8 head, void *data, u32 size,
s64 syncbuf, struct nv50_dmac *dmac);
void nv50_dmac_destroy(struct nv50_dmac *);
@@ -108,9 +101,6 @@ void nv50_dmac_destroy(struct nv50_dmac *);
*/
struct nouveau_encoder *nv50_real_outp(struct drm_encoder *encoder);
-u32 *evo_wait(struct nv50_dmac *, int nr);
-void evo_kick(u32 *, struct nv50_dmac *);
-
extern const u64 disp50xx_modifiers[];
extern const u64 disp90xx_modifiers[];
extern const u64 wndwc57e_modifiers[];
diff --git a/drivers/gpu/drm/nouveau/dispnv50/head507d.c b/drivers/gpu/drm/nouveau/dispnv50/head507d.c
index 0edd4e520c8e..7fa1e0279d7d 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/head507d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/head507d.c
@@ -29,7 +29,7 @@
int
head507d_procamp(struct nv50_head *head, struct nv50_head_atom *asyh)
{
- struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ struct nvif_push *push = &nv50_disp(head->base.base.dev)->core->chan.push;
const int i = head->base.index;
int ret;
@@ -48,7 +48,7 @@ head507d_procamp(struct nv50_head *head, struct nv50_head_atom *asyh)
int
head507d_dither(struct nv50_head *head, struct nv50_head_atom *asyh)
{
- struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ struct nvif_push *push = &nv50_disp(head->base.base.dev)->core->chan.push;
const int i = head->base.index;
int ret;
@@ -66,7 +66,7 @@ head507d_dither(struct nv50_head *head, struct nv50_head_atom *asyh)
int
head507d_ovly(struct nv50_head *head, struct nv50_head_atom *asyh)
{
- struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ struct nvif_push *push = &nv50_disp(head->base.base.dev)->core->chan.push;
const int i = head->base.index;
u32 bounds = 0;
int ret;
@@ -94,7 +94,7 @@ head507d_ovly(struct nv50_head *head, struct nv50_head_atom *asyh)
int
head507d_base(struct nv50_head *head, struct nv50_head_atom *asyh)
{
- struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ struct nvif_push *push = &nv50_disp(head->base.base.dev)->core->chan.push;
const int i = head->base.index;
u32 bounds = 0;
int ret;
@@ -122,7 +122,7 @@ head507d_base(struct nv50_head *head, struct nv50_head_atom *asyh)
static int
head507d_curs_clr(struct nv50_head *head)
{
- struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ struct nvif_push *push = &nv50_disp(head->base.base.dev)->core->chan.push;
const int i = head->base.index;
int ret;
@@ -139,7 +139,7 @@ head507d_curs_clr(struct nv50_head *head)
static int
head507d_curs_set(struct nv50_head *head, struct nv50_head_atom *asyh)
{
- struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ struct nvif_push *push = &nv50_disp(head->base.base.dev)->core->chan.push;
const int i = head->base.index;
int ret;
@@ -188,7 +188,7 @@ head507d_curs_layout(struct nv50_head *head, struct nv50_wndw_atom *asyw,
int
head507d_core_clr(struct nv50_head *head)
{
- struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ struct nvif_push *push = &nv50_disp(head->base.base.dev)->core->chan.push;
const int i = head->base.index;
int ret;
@@ -202,7 +202,7 @@ head507d_core_clr(struct nv50_head *head)
static int
head507d_core_set(struct nv50_head *head, struct nv50_head_atom *asyh)
{
- struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ struct nvif_push *push = &nv50_disp(head->base.base.dev)->core->chan.push;
const int i = head->base.index;
int ret;
@@ -278,7 +278,7 @@ head507d_core_calc(struct nv50_head *head, struct nv50_head_atom *asyh)
static int
head507d_olut_clr(struct nv50_head *head)
{
- struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ struct nvif_push *push = &nv50_disp(head->base.base.dev)->core->chan.push;
const int i = head->base.index;
int ret;
@@ -293,7 +293,7 @@ head507d_olut_clr(struct nv50_head *head)
static int
head507d_olut_set(struct nv50_head *head, struct nv50_head_atom *asyh)
{
- struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ struct nvif_push *push = &nv50_disp(head->base.base.dev)->core->chan.push;
const int i = head->base.index;
int ret;
@@ -345,7 +345,7 @@ head507d_olut(struct nv50_head *head, struct nv50_head_atom *asyh, int size)
int
head507d_mode(struct nv50_head *head, struct nv50_head_atom *asyh)
{
- struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ struct nvif_push *push = &nv50_disp(head->base.base.dev)->core->chan.push;
struct nv50_head_mode *m = &asyh->mode;
const int i = head->base.index;
int ret;
@@ -400,7 +400,7 @@ head507d_mode(struct nv50_head *head, struct nv50_head_atom *asyh)
int
head507d_view(struct nv50_head *head, struct nv50_head_atom *asyh)
{
- struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ struct nvif_push *push = &nv50_disp(head->base.base.dev)->core->chan.push;
const int i = head->base.index;
int ret;
diff --git a/drivers/gpu/drm/nouveau/dispnv50/head827d.c b/drivers/gpu/drm/nouveau/dispnv50/head827d.c
index 194d1771c481..1545d576fe9c 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/head827d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/head827d.c
@@ -29,7 +29,7 @@
static int
head827d_curs_clr(struct nv50_head *head)
{
- struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ struct nvif_push *push = &nv50_disp(head->base.base.dev)->core->chan.push;
const int i = head->base.index;
int ret;
@@ -48,7 +48,7 @@ head827d_curs_clr(struct nv50_head *head)
static int
head827d_curs_set(struct nv50_head *head, struct nv50_head_atom *asyh)
{
- struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ struct nvif_push *push = &nv50_disp(head->base.base.dev)->core->chan.push;
const int i = head->base.index;
int ret;
@@ -73,7 +73,7 @@ head827d_curs_set(struct nv50_head *head, struct nv50_head_atom *asyh)
static int
head827d_core_set(struct nv50_head *head, struct nv50_head_atom *asyh)
{
- struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ struct nvif_push *push = &nv50_disp(head->base.base.dev)->core->chan.push;
const int i = head->base.index;
int ret;
@@ -110,7 +110,7 @@ head827d_core_set(struct nv50_head *head, struct nv50_head_atom *asyh)
static int
head827d_olut_clr(struct nv50_head *head)
{
- struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ struct nvif_push *push = &nv50_disp(head->base.base.dev)->core->chan.push;
const int i = head->base.index;
int ret;
@@ -127,7 +127,7 @@ head827d_olut_clr(struct nv50_head *head)
static int
head827d_olut_set(struct nv50_head *head, struct nv50_head_atom *asyh)
{
- struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ struct nvif_push *push = &nv50_disp(head->base.base.dev)->core->chan.push;
const int i = head->base.index;
int ret;
diff --git a/drivers/gpu/drm/nouveau/dispnv50/head907d.c b/drivers/gpu/drm/nouveau/dispnv50/head907d.c
index 18fe4c1e2d6a..6c9e0438e55c 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/head907d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/head907d.c
@@ -36,7 +36,7 @@
int
head907d_or(struct nv50_head *head, struct nv50_head_atom *asyh)
{
- struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ struct nvif_push *push = &nv50_disp(head->base.base.dev)->core->chan.push;
const int i = head->base.index;
int ret;
@@ -57,7 +57,7 @@ head907d_or(struct nv50_head *head, struct nv50_head_atom *asyh)
int
head907d_procamp(struct nv50_head *head, struct nv50_head_atom *asyh)
{
- struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ struct nvif_push *push = &nv50_disp(head->base.base.dev)->core->chan.push;
const int i = head->base.index;
int ret;
@@ -77,7 +77,7 @@ head907d_procamp(struct nv50_head *head, struct nv50_head_atom *asyh)
static int
head907d_dither(struct nv50_head *head, struct nv50_head_atom *asyh)
{
- struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ struct nvif_push *push = &nv50_disp(head->base.base.dev)->core->chan.push;
const int i = head->base.index;
int ret;
@@ -95,7 +95,7 @@ head907d_dither(struct nv50_head *head, struct nv50_head_atom *asyh)
int
head907d_ovly(struct nv50_head *head, struct nv50_head_atom *asyh)
{
- struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ struct nvif_push *push = &nv50_disp(head->base.base.dev)->core->chan.push;
const int i = head->base.index;
u32 bounds = 0;
int ret;
@@ -124,7 +124,7 @@ head907d_ovly(struct nv50_head *head, struct nv50_head_atom *asyh)
static int
head907d_base(struct nv50_head *head, struct nv50_head_atom *asyh)
{
- struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ struct nvif_push *push = &nv50_disp(head->base.base.dev)->core->chan.push;
const int i = head->base.index;
u32 bounds = 0;
int ret;
@@ -152,7 +152,7 @@ head907d_base(struct nv50_head *head, struct nv50_head_atom *asyh)
int
head907d_curs_clr(struct nv50_head *head)
{
- struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ struct nvif_push *push = &nv50_disp(head->base.base.dev)->core->chan.push;
const int i = head->base.index;
int ret;
@@ -171,7 +171,7 @@ head907d_curs_clr(struct nv50_head *head)
int
head907d_curs_set(struct nv50_head *head, struct nv50_head_atom *asyh)
{
- struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ struct nvif_push *push = &nv50_disp(head->base.base.dev)->core->chan.push;
const int i = head->base.index;
int ret;
@@ -195,7 +195,7 @@ head907d_curs_set(struct nv50_head *head, struct nv50_head_atom *asyh)
int
head907d_core_clr(struct nv50_head *head)
{
- struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ struct nvif_push *push = &nv50_disp(head->base.base.dev)->core->chan.push;
const int i = head->base.index;
int ret;
@@ -209,7 +209,7 @@ head907d_core_clr(struct nv50_head *head)
int
head907d_core_set(struct nv50_head *head, struct nv50_head_atom *asyh)
{
- struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ struct nvif_push *push = &nv50_disp(head->base.base.dev)->core->chan.push;
const int i = head->base.index;
int ret;
@@ -246,7 +246,7 @@ head907d_core_set(struct nv50_head *head, struct nv50_head_atom *asyh)
int
head907d_olut_clr(struct nv50_head *head)
{
- struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ struct nvif_push *push = &nv50_disp(head->base.base.dev)->core->chan.push;
const int i = head->base.index;
int ret;
@@ -263,7 +263,7 @@ head907d_olut_clr(struct nv50_head *head)
int
head907d_olut_set(struct nv50_head *head, struct nv50_head_atom *asyh)
{
- struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ struct nvif_push *push = &nv50_disp(head->base.base.dev)->core->chan.push;
const int i = head->base.index;
int ret;
@@ -322,7 +322,7 @@ bool head907d_ilut_check(int size)
int
head907d_mode(struct nv50_head *head, struct nv50_head_atom *asyh)
{
- struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ struct nvif_push *push = &nv50_disp(head->base.base.dev)->core->chan.push;
struct nv50_head_mode *m = &asyh->mode;
const int i = head->base.index;
int ret;
@@ -378,7 +378,7 @@ head907d_mode(struct nv50_head *head, struct nv50_head_atom *asyh)
int
head907d_view(struct nv50_head *head, struct nv50_head_atom *asyh)
{
- struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ struct nvif_push *push = &nv50_disp(head->base.base.dev)->core->chan.push;
const int i = head->base.index;
int ret;
diff --git a/drivers/gpu/drm/nouveau/dispnv50/head917d.c b/drivers/gpu/drm/nouveau/dispnv50/head917d.c
index 4ce47b55f72c..2d9aee050510 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/head917d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/head917d.c
@@ -30,7 +30,7 @@
static int
head917d_dither(struct nv50_head *head, struct nv50_head_atom *asyh)
{
- struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ struct nvif_push *push = &nv50_disp(head->base.base.dev)->core->chan.push;
const int i = head->base.index;
int ret;
@@ -48,7 +48,7 @@ head917d_dither(struct nv50_head *head, struct nv50_head_atom *asyh)
static int
head917d_base(struct nv50_head *head, struct nv50_head_atom *asyh)
{
- struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ struct nvif_push *push = &nv50_disp(head->base.base.dev)->core->chan.push;
const int i = head->base.index;
u32 bounds = 0;
int ret;
@@ -77,7 +77,7 @@ head917d_base(struct nv50_head *head, struct nv50_head_atom *asyh)
static int
head917d_curs_set(struct nv50_head *head, struct nv50_head_atom *asyh)
{
- struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ struct nvif_push *push = &nv50_disp(head->base.base.dev)->core->chan.push;
const int i = head->base.index;
int ret;
diff --git a/drivers/gpu/drm/nouveau/dispnv50/headc37d.c b/drivers/gpu/drm/nouveau/dispnv50/headc37d.c
index a4a3b78ea42c..2bcb3790fc10 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/headc37d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/headc37d.c
@@ -30,7 +30,7 @@
static int
headc37d_or(struct nv50_head *head, struct nv50_head_atom *asyh)
{
- struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ struct nvif_push *push = &nv50_disp(head->base.base.dev)->core->chan.push;
const int i = head->base.index;
u8 depth;
int ret;
@@ -64,7 +64,7 @@ headc37d_or(struct nv50_head *head, struct nv50_head_atom *asyh)
static int
headc37d_procamp(struct nv50_head *head, struct nv50_head_atom *asyh)
{
- struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ struct nvif_push *push = &nv50_disp(head->base.base.dev)->core->chan.push;
const int i = head->base.index;
int ret;
@@ -85,7 +85,7 @@ headc37d_procamp(struct nv50_head *head, struct nv50_head_atom *asyh)
int
headc37d_dither(struct nv50_head *head, struct nv50_head_atom *asyh)
{
- struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ struct nvif_push *push = &nv50_disp(head->base.base.dev)->core->chan.push;
const int i = head->base.index;
int ret;
@@ -104,7 +104,7 @@ headc37d_dither(struct nv50_head *head, struct nv50_head_atom *asyh)
int
headc37d_curs_clr(struct nv50_head *head)
{
- struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ struct nvif_push *push = &nv50_disp(head->base.base.dev)->core->chan.push;
const int i = head->base.index;
int ret;
@@ -122,7 +122,7 @@ headc37d_curs_clr(struct nv50_head *head)
int
headc37d_curs_set(struct nv50_head *head, struct nv50_head_atom *asyh)
{
- struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ struct nvif_push *push = &nv50_disp(head->base.base.dev)->core->chan.push;
const int i = head->base.index;
int ret;
@@ -161,7 +161,7 @@ headc37d_curs_format(struct nv50_head *head, struct nv50_wndw_atom *asyw,
static int
headc37d_olut_clr(struct nv50_head *head)
{
- struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ struct nvif_push *push = &nv50_disp(head->base.base.dev)->core->chan.push;
const int i = head->base.index;
int ret;
@@ -175,7 +175,7 @@ headc37d_olut_clr(struct nv50_head *head)
static int
headc37d_olut_set(struct nv50_head *head, struct nv50_head_atom *asyh)
{
- struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ struct nvif_push *push = &nv50_disp(head->base.base.dev)->core->chan.push;
const int i = head->base.index;
int ret;
@@ -209,7 +209,7 @@ headc37d_olut(struct nv50_head *head, struct nv50_head_atom *asyh, int size)
static int
headc37d_mode(struct nv50_head *head, struct nv50_head_atom *asyh)
{
- struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ struct nvif_push *push = &nv50_disp(head->base.base.dev)->core->chan.push;
struct nv50_head_mode *m = &asyh->mode;
const int i = head->base.index;
int ret;
@@ -254,7 +254,7 @@ headc37d_mode(struct nv50_head *head, struct nv50_head_atom *asyh)
int
headc37d_view(struct nv50_head *head, struct nv50_head_atom *asyh)
{
- struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ struct nvif_push *push = &nv50_disp(head->base.base.dev)->core->chan.push;
const int i = head->base.index;
int ret;
diff --git a/drivers/gpu/drm/nouveau/dispnv50/headc57d.c b/drivers/gpu/drm/nouveau/dispnv50/headc57d.c
index 53b1248c40ec..fde4087e7691 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/headc57d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/headc57d.c
@@ -30,7 +30,7 @@
static int
headc57d_display_id(struct nv50_head *head, u32 display_id)
{
- struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ struct nvif_push *push = &nv50_disp(head->base.base.dev)->core->chan.push;
int ret;
if ((ret = PUSH_WAIT(push, 2)))
@@ -43,7 +43,7 @@ headc57d_display_id(struct nv50_head *head, u32 display_id)
static int
headc57d_or(struct nv50_head *head, struct nv50_head_atom *asyh)
{
- struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ struct nvif_push *push = &nv50_disp(head->base.base.dev)->core->chan.push;
const int i = head->base.index;
u8 depth;
int ret;
@@ -78,7 +78,7 @@ headc57d_or(struct nv50_head *head, struct nv50_head_atom *asyh)
static int
headc57d_procamp(struct nv50_head *head, struct nv50_head_atom *asyh)
{
- struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ struct nvif_push *push = &nv50_disp(head->base.base.dev)->core->chan.push;
const int i = head->base.index;
int ret;
@@ -96,7 +96,7 @@ headc57d_procamp(struct nv50_head *head, struct nv50_head_atom *asyh)
static int
headc57d_olut_clr(struct nv50_head *head)
{
- struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ struct nvif_push *push = &nv50_disp(head->base.base.dev)->core->chan.push;
const int i = head->base.index;
int ret;
@@ -110,7 +110,7 @@ headc57d_olut_clr(struct nv50_head *head)
static int
headc57d_olut_set(struct nv50_head *head, struct nv50_head_atom *asyh)
{
- struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ struct nvif_push *push = &nv50_disp(head->base.base.dev)->core->chan.push;
const int i = head->base.index;
int ret;
@@ -201,7 +201,7 @@ headc57d_olut(struct nv50_head *head, struct nv50_head_atom *asyh, int size)
static int
headc57d_mode(struct nv50_head *head, struct nv50_head_atom *asyh)
{
- struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ struct nvif_push *push = &nv50_disp(head->base.base.dev)->core->chan.push;
struct nv50_head_mode *m = &asyh->mode;
const int i = head->base.index;
int ret;
diff --git a/drivers/gpu/drm/nouveau/dispnv50/ovly507e.c b/drivers/gpu/drm/nouveau/dispnv50/ovly507e.c
index 797c1e4e0eaa..654e506f8431 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/ovly507e.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/ovly507e.c
@@ -33,7 +33,7 @@
int
ovly507e_scale_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
{
- struct nvif_push *push = wndw->wndw.push;
+ struct nvif_push *push = &wndw->wndw.push;
int ret;
if ((ret = PUSH_WAIT(push, 4)))
@@ -55,7 +55,7 @@ ovly507e_scale_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
static int
ovly507e_image_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
{
- struct nvif_push *push = wndw->wndw.push;
+ struct nvif_push *push = &wndw->wndw.push;
int ret;
if ((ret = PUSH_WAIT(push, 12)))
@@ -159,7 +159,7 @@ ovly507e_new_(const struct nv50_wndw_func *func, const u32 *format,
if (*pwndw = wndw, ret)
return ret;
- ret = nv50_dmac_create(&drm->client.device, &disp->disp->object,
+ ret = nv50_dmac_create(drm,
&oclass, 0, &args, sizeof(args),
disp->sync->offset, &wndw->wndw);
if (ret) {
diff --git a/drivers/gpu/drm/nouveau/dispnv50/ovly827e.c b/drivers/gpu/drm/nouveau/dispnv50/ovly827e.c
index 02dc02d9260f..a5ae22ed663d 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/ovly827e.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/ovly827e.c
@@ -32,7 +32,7 @@
static int
ovly827e_image_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
{
- struct nvif_push *push = wndw->wndw.push;
+ struct nvif_push *push = &wndw->wndw.push;
int ret;
if ((ret = PUSH_WAIT(push, 12)))
diff --git a/drivers/gpu/drm/nouveau/dispnv50/ovly907e.c b/drivers/gpu/drm/nouveau/dispnv50/ovly907e.c
index 645130d18a99..8cf0e18fa596 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/ovly907e.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/ovly907e.c
@@ -29,7 +29,7 @@
static int
ovly907e_image_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
{
- struct nvif_push *push = wndw->wndw.push;
+ struct nvif_push *push = &wndw->wndw.push;
int ret;
if ((ret = PUSH_WAIT(push, 12)))
diff --git a/drivers/gpu/drm/nouveau/dispnv50/pior507d.c b/drivers/gpu/drm/nouveau/dispnv50/pior507d.c
index 17d230256bdd..79507d169778 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/pior507d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/pior507d.c
@@ -30,7 +30,7 @@ static int
pior507d_ctrl(struct nv50_core *core, int or, u32 ctrl,
struct nv50_head_atom *asyh)
{
- struct nvif_push *push = core->chan.push;
+ struct nvif_push *push = &core->chan.push;
int ret;
if (asyh) {
diff --git a/drivers/gpu/drm/nouveau/dispnv50/sor507d.c b/drivers/gpu/drm/nouveau/dispnv50/sor507d.c
index ca73d7710885..08cc9845322e 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/sor507d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/sor507d.c
@@ -30,7 +30,7 @@ static int
sor507d_ctrl(struct nv50_core *core, int or, u32 ctrl,
struct nv50_head_atom *asyh)
{
- struct nvif_push *push = core->chan.push;
+ struct nvif_push *push = &core->chan.push;
int ret;
if (asyh) {
diff --git a/drivers/gpu/drm/nouveau/dispnv50/sor907d.c b/drivers/gpu/drm/nouveau/dispnv50/sor907d.c
index c86cd8fa61d6..23957cc8f326 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/sor907d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/sor907d.c
@@ -32,7 +32,7 @@ static int
sor907d_ctrl(struct nv50_core *core, int or, u32 ctrl,
struct nv50_head_atom *asyh)
{
- struct nvif_push *push = core->chan.push;
+ struct nvif_push *push = &core->chan.push;
int ret;
if ((ret = PUSH_WAIT(push, 2)))
diff --git a/drivers/gpu/drm/nouveau/dispnv50/sorc37d.c b/drivers/gpu/drm/nouveau/dispnv50/sorc37d.c
index 9eaef34816da..da05d4614e00 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/sorc37d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/sorc37d.c
@@ -29,7 +29,7 @@ static int
sorc37d_ctrl(struct nv50_core *core, int or, u32 ctrl,
struct nv50_head_atom *asyh)
{
- struct nvif_push *push = core->chan.push;
+ struct nvif_push *push = &core->chan.push;
int ret;
if ((ret = PUSH_WAIT(push, 2)))
diff --git a/drivers/gpu/drm/nouveau/dispnv50/wimmc37b.c b/drivers/gpu/drm/nouveau/dispnv50/wimmc37b.c
index ee76b091d4ef..7985da61aaac 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/wimmc37b.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/wimmc37b.c
@@ -31,7 +31,7 @@
static int
wimmc37b_update(struct nv50_wndw *wndw, u32 *interlock)
{
- struct nvif_push *push = wndw->wimm.push;
+ struct nvif_push *push = &wndw->wimm.push;
int ret;
if ((ret = PUSH_WAIT(push, 2)))
@@ -46,7 +46,7 @@ wimmc37b_update(struct nv50_wndw *wndw, u32 *interlock)
static int
wimmc37b_point(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
{
- struct nvif_push *push = wndw->wimm.push;
+ struct nvif_push *push = &wndw->wimm.push;
int ret;
if ((ret = PUSH_WAIT(push, 2)))
@@ -71,10 +71,9 @@ wimmc37b_init_(const struct nv50_wimm_func *func, struct nouveau_drm *drm,
struct nvif_disp_chan_v0 args = {
.id = wndw->id,
};
- struct nv50_disp *disp = nv50_disp(drm->dev);
int ret;
- ret = nv50_dmac_create(&drm->client.device, &disp->disp->object,
+ ret = nv50_dmac_create(drm,
&oclass, 0, &args, sizeof(args), -1,
&wndw->wimm);
if (ret) {
diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndwc37e.c b/drivers/gpu/drm/nouveau/dispnv50/wndwc37e.c
index b3deea5aca58..50a7b97d37a2 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/wndwc37e.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/wndwc37e.c
@@ -39,7 +39,7 @@ wndwc37e_csc_clr(struct nv50_wndw *wndw)
static int
wndwc37e_csc_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
{
- struct nvif_push *push = wndw->wndw.push;
+ struct nvif_push *push = &wndw->wndw.push;
int ret;
if ((ret = PUSH_WAIT(push, 13)))
@@ -52,7 +52,7 @@ wndwc37e_csc_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
static int
wndwc37e_ilut_clr(struct nv50_wndw *wndw)
{
- struct nvif_push *push = wndw->wndw.push;
+ struct nvif_push *push = &wndw->wndw.push;
int ret;
if ((ret = PUSH_WAIT(push, 2)))
@@ -65,7 +65,7 @@ wndwc37e_ilut_clr(struct nv50_wndw *wndw)
static int
wndwc37e_ilut_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
{
- struct nvif_push *push = wndw->wndw.push;
+ struct nvif_push *push = &wndw->wndw.push;
int ret;
if ((ret = PUSH_WAIT(push, 4)))
@@ -94,7 +94,7 @@ wndwc37e_ilut(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw, int size)
int
wndwc37e_blend_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
{
- struct nvif_push *push = wndw->wndw.push;
+ struct nvif_push *push = &wndw->wndw.push;
int ret;
if ((ret = PUSH_WAIT(push, 8)))
@@ -139,7 +139,7 @@ wndwc37e_blend_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
int
wndwc37e_image_clr(struct nv50_wndw *wndw)
{
- struct nvif_push *push = wndw->wndw.push;
+ struct nvif_push *push = &wndw->wndw.push;
int ret;
if ((ret = PUSH_WAIT(push, 4)))
@@ -156,7 +156,7 @@ wndwc37e_image_clr(struct nv50_wndw *wndw)
static int
wndwc37e_image_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
{
- struct nvif_push *push = wndw->wndw.push;
+ struct nvif_push *push = &wndw->wndw.push;
int ret;
if ((ret = PUSH_WAIT(push, 17)))
@@ -209,7 +209,7 @@ wndwc37e_image_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
int
wndwc37e_ntfy_clr(struct nv50_wndw *wndw)
{
- struct nvif_push *push = wndw->wndw.push;
+ struct nvif_push *push = &wndw->wndw.push;
int ret;
if ((ret = PUSH_WAIT(push, 2)))
@@ -222,7 +222,7 @@ wndwc37e_ntfy_clr(struct nv50_wndw *wndw)
int
wndwc37e_ntfy_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
{
- struct nvif_push *push = wndw->wndw.push;
+ struct nvif_push *push = &wndw->wndw.push;
int ret;
if ((ret = PUSH_WAIT(push, 3)))
@@ -239,7 +239,7 @@ wndwc37e_ntfy_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
int
wndwc37e_sema_clr(struct nv50_wndw *wndw)
{
- struct nvif_push *push = wndw->wndw.push;
+ struct nvif_push *push = &wndw->wndw.push;
int ret;
if ((ret = PUSH_WAIT(push, 2)))
@@ -252,7 +252,7 @@ wndwc37e_sema_clr(struct nv50_wndw *wndw)
int
wndwc37e_sema_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
{
- struct nvif_push *push = wndw->wndw.push;
+ struct nvif_push *push = &wndw->wndw.push;
int ret;
if ((ret = PUSH_WAIT(push, 5)))
@@ -268,7 +268,7 @@ wndwc37e_sema_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
int
wndwc37e_update(struct nv50_wndw *wndw, u32 *interlock)
{
- struct nvif_push *push = wndw->wndw.push;
+ struct nvif_push *push = &wndw->wndw.push;
int ret;
if ((ret = PUSH_WAIT(push, 5)))
@@ -363,7 +363,7 @@ wndwc37e_new_(const struct nv50_wndw_func *func, struct nouveau_drm *drm,
if (*pwndw = wndw, ret)
return ret;
- ret = nv50_dmac_create(&drm->client.device, &disp->disp->object,
+ ret = nv50_dmac_create(drm,
&oclass, 0, &args, sizeof(args),
disp->sync->offset, &wndw->wndw);
if (ret) {
diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndwc57e.c b/drivers/gpu/drm/nouveau/dispnv50/wndwc57e.c
index 1d214a4b960a..d1ca51aae58c 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/wndwc57e.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/wndwc57e.c
@@ -32,7 +32,7 @@
static int
wndwc57e_image_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
{
- struct nvif_push *push = wndw->wndw.push;
+ struct nvif_push *push = &wndw->wndw.push;
int ret;
if ((ret = PUSH_WAIT(push, 17)))
@@ -81,7 +81,7 @@ wndwc57e_image_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
int
wndwc57e_csc_clr(struct nv50_wndw *wndw)
{
- struct nvif_push *push = wndw->wndw.push;
+ struct nvif_push *push = &wndw->wndw.push;
const u32 identity[12] = {
0x00010000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00010000, 0x00000000, 0x00000000,
@@ -99,7 +99,7 @@ wndwc57e_csc_clr(struct nv50_wndw *wndw)
int
wndwc57e_csc_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
{
- struct nvif_push *push = wndw->wndw.push;
+ struct nvif_push *push = &wndw->wndw.push;
int ret;
if ((ret = PUSH_WAIT(push, 13)))
@@ -112,7 +112,7 @@ wndwc57e_csc_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
int
wndwc57e_ilut_clr(struct nv50_wndw *wndw)
{
- struct nvif_push *push = wndw->wndw.push;
+ struct nvif_push *push = &wndw->wndw.push;
int ret;
if ((ret = PUSH_WAIT(push, 2)))
@@ -125,7 +125,7 @@ wndwc57e_ilut_clr(struct nv50_wndw *wndw)
int
wndwc57e_ilut_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
{
- struct nvif_push *push = wndw->wndw.push;
+ struct nvif_push *push = &wndw->wndw.push;
int ret;
if ((ret = PUSH_WAIT(push, 4)))
diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndwc67e.c b/drivers/gpu/drm/nouveau/dispnv50/wndwc67e.c
index 7a370fa1df20..52af293c98f4 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/wndwc67e.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/wndwc67e.c
@@ -29,7 +29,7 @@
static int
wndwc67e_image_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
{
- struct nvif_push *push = wndw->wndw.push;
+ struct nvif_push *push = &wndw->wndw.push;
int ret;
if ((ret = PUSH_WAIT(push, 17)))
diff --git a/drivers/gpu/drm/nouveau/include/nvif/cl0080.h b/drivers/gpu/drm/nouveau/include/nvif/cl0080.h
index fa161b74d967..ea937fa7bc55 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/cl0080.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/cl0080.h
@@ -2,13 +2,6 @@
#ifndef __NVIF_CL0080_H__
#define __NVIF_CL0080_H__
-struct nv_device_v0 {
- __u8 version;
- __u8 priv;
- __u8 pad02[6];
- __u64 device; /* device identifier, ~0 for client default */
-};
-
#define NV_DEVICE_V0_INFO 0x00
#define NV_DEVICE_V0_TIME 0x01
diff --git a/drivers/gpu/drm/nouveau/include/nvif/class.h b/drivers/gpu/drm/nouveau/include/nvif/class.h
index e668ab1664f0..824e052dcc25 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/class.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/class.h
@@ -7,9 +7,6 @@
#define NVIF_CLASS_CONTROL /* if0001.h */ -0x00000001
-#define NVIF_CLASS_PERFMON /* if0002.h */ -0x00000002
-#define NVIF_CLASS_PERFDOM /* if0003.h */ -0x00000003
-
#define NVIF_CLASS_SW_NV04 /* if0004.h */ -0x00000004
#define NVIF_CLASS_SW_NV10 /* if0005.h */ -0x00000005
#define NVIF_CLASS_SW_NV50 /* if0005.h */ -0x00000006
diff --git a/drivers/gpu/drm/nouveau/include/nvif/client.h b/drivers/gpu/drm/nouveau/include/nvif/client.h
index 5d9395e651b6..03f1d564eb12 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/client.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/client.h
@@ -7,21 +7,12 @@
struct nvif_client {
struct nvif_object object;
const struct nvif_driver *driver;
- u64 version;
- u8 route;
};
-int nvif_client_ctor(struct nvif_client *parent, const char *name, u64 device,
- struct nvif_client *);
+int nvif_client_ctor(struct nvif_client *parent, const char *name, struct nvif_client *);
void nvif_client_dtor(struct nvif_client *);
-int nvif_client_ioctl(struct nvif_client *, void *, u32);
int nvif_client_suspend(struct nvif_client *);
int nvif_client_resume(struct nvif_client *);
/*XXX*/
-#include <core/client.h>
-#define nvxx_client(a) ({ \
- struct nvif_client *_client = (a); \
- (struct nvkm_client *)_client->object.priv; \
-})
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvif/device.h b/drivers/gpu/drm/nouveau/include/nvif/device.h
index b0e59800a320..7877a2a79da9 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/device.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/device.h
@@ -18,41 +18,8 @@ struct nvif_device {
struct nvif_user user;
};
-int nvif_device_ctor(struct nvif_object *, const char *name, u32 handle,
- s32 oclass, void *, u32, struct nvif_device *);
+int nvif_device_ctor(struct nvif_client *, const char *name, struct nvif_device *);
void nvif_device_dtor(struct nvif_device *);
+int nvif_device_map(struct nvif_device *);
u64 nvif_device_time(struct nvif_device *);
-
-/*XXX*/
-#include <subdev/bios.h>
-#include <subdev/fb.h>
-#include <subdev/bar.h>
-#include <subdev/gpio.h>
-#include <subdev/clk.h>
-#include <subdev/i2c.h>
-#include <subdev/timer.h>
-#include <subdev/therm.h>
-#include <subdev/pci.h>
-
-#define nvxx_device(a) ({ \
- struct nvif_device *_device = (a); \
- struct { \
- struct nvkm_object object; \
- struct nvkm_device *device; \
- } *_udevice = _device->object.priv; \
- _udevice->device; \
-})
-#define nvxx_bios(a) nvxx_device(a)->bios
-#define nvxx_fb(a) nvxx_device(a)->fb
-#define nvxx_gpio(a) nvxx_device(a)->gpio
-#define nvxx_clk(a) nvxx_device(a)->clk
-#define nvxx_i2c(a) nvxx_device(a)->i2c
-#define nvxx_iccsense(a) nvxx_device(a)->iccsense
-#define nvxx_therm(a) nvxx_device(a)->therm
-#define nvxx_volt(a) nvxx_device(a)->volt
-
-#include <engine/fifo.h>
-#include <engine/gr.h>
-
-#define nvxx_gr(a) nvxx_device(a)->gr
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvif/driver.h b/drivers/gpu/drm/nouveau/include/nvif/driver.h
index 7a3af05f7f98..7b08ff769039 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/driver.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/driver.h
@@ -8,20 +8,15 @@ struct nvif_driver {
const char *name;
int (*init)(const char *name, u64 device, const char *cfg,
const char *dbg, void **priv);
- void (*fini)(void *priv);
int (*suspend)(void *priv);
int (*resume)(void *priv);
int (*ioctl)(void *priv, void *data, u32 size, void **hack);
void __iomem *(*map)(void *priv, u64 handle, u32 size);
void (*unmap)(void *priv, void __iomem *ptr, u32 size);
- bool keep;
};
int nvif_driver_init(const char *drv, const char *cfg, const char *dbg,
const char *name, u64 device, struct nvif_client *);
extern const struct nvif_driver nvif_driver_nvkm;
-extern const struct nvif_driver nvif_driver_drm;
-extern const struct nvif_driver nvif_driver_lib;
-extern const struct nvif_driver nvif_driver_null;
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvif/if0000.h b/drivers/gpu/drm/nouveau/include/nvif/if0000.h
index f7b8f8f48760..c06383835337 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/if0000.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/if0000.h
@@ -5,16 +5,6 @@
struct nvif_client_v0 {
__u8 version;
__u8 pad01[7];
- __u64 device;
char name[32];
};
-
-#define NVIF_CLIENT_V0_DEVLIST 0x00
-
-struct nvif_client_devlist_v0 {
- __u8 version;
- __u8 count;
- __u8 pad02[6];
- __u64 device[];
-};
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvif/if0002.h b/drivers/gpu/drm/nouveau/include/nvif/if0002.h
deleted file mode 100644
index df2915d6a61e..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvif/if0002.h
+++ /dev/null
@@ -1,39 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-#ifndef __NVIF_IF0002_H__
-#define __NVIF_IF0002_H__
-
-#define NVIF_PERFMON_V0_QUERY_DOMAIN 0x00
-#define NVIF_PERFMON_V0_QUERY_SIGNAL 0x01
-#define NVIF_PERFMON_V0_QUERY_SOURCE 0x02
-
-struct nvif_perfmon_query_domain_v0 {
- __u8 version;
- __u8 id;
- __u8 counter_nr;
- __u8 iter;
- __u16 signal_nr;
- __u8 pad05[2];
- char name[64];
-};
-
-struct nvif_perfmon_query_signal_v0 {
- __u8 version;
- __u8 domain;
- __u16 iter;
- __u8 signal;
- __u8 source_nr;
- __u8 pad05[2];
- char name[64];
-};
-
-struct nvif_perfmon_query_source_v0 {
- __u8 version;
- __u8 domain;
- __u8 signal;
- __u8 iter;
- __u8 pad04[4];
- __u32 source;
- __u32 mask;
- char name[64];
-};
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvif/if0003.h b/drivers/gpu/drm/nouveau/include/nvif/if0003.h
deleted file mode 100644
index 78467da07c37..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvif/if0003.h
+++ /dev/null
@@ -1,34 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-#ifndef __NVIF_IF0003_H__
-#define __NVIF_IF0003_H__
-
-struct nvif_perfdom_v0 {
- __u8 version;
- __u8 domain;
- __u8 mode;
- __u8 pad03[1];
- struct {
- __u8 signal[4];
- __u64 source[4][8];
- __u16 logic_op;
- } ctr[4];
-};
-
-#define NVIF_PERFDOM_V0_INIT 0x00
-#define NVIF_PERFDOM_V0_SAMPLE 0x01
-#define NVIF_PERFDOM_V0_READ 0x02
-
-struct nvif_perfdom_init {
-};
-
-struct nvif_perfdom_sample {
-};
-
-struct nvif_perfdom_read_v0 {
- __u8 version;
- __u8 pad01[7];
- __u32 ctr[4];
- __u32 clk;
- __u8 pad04[4];
-};
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvif/ioctl.h b/drivers/gpu/drm/nouveau/include/nvif/ioctl.h
index 4e047bb1fc07..e825c8a1d9ca 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/ioctl.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/ioctl.h
@@ -2,17 +2,12 @@
#ifndef __NVIF_IOCTL_H__
#define __NVIF_IOCTL_H__
-#define NVIF_VERSION_LATEST 0x0000000000000100ULL
-
struct nvif_ioctl_v0 {
__u8 version;
-#define NVIF_IOCTL_V0_NOP 0x00
#define NVIF_IOCTL_V0_SCLASS 0x01
#define NVIF_IOCTL_V0_NEW 0x02
#define NVIF_IOCTL_V0_DEL 0x03
#define NVIF_IOCTL_V0_MTHD 0x04
-#define NVIF_IOCTL_V0_RD 0x05
-#define NVIF_IOCTL_V0_WR 0x06
#define NVIF_IOCTL_V0_MAP 0x07
#define NVIF_IOCTL_V0_UNMAP 0x08
__u8 type;
@@ -28,10 +23,6 @@ struct nvif_ioctl_v0 {
__u8 data[]; /* ioctl data (below) */
};
-struct nvif_ioctl_nop_v0 {
- __u64 version;
-};
-
struct nvif_ioctl_sclass_v0 {
/* nvif_ioctl ... */
__u8 version;
@@ -67,24 +58,6 @@ struct nvif_ioctl_mthd_v0 {
__u8 data[]; /* method data (class.h) */
};
-struct nvif_ioctl_rd_v0 {
- /* nvif_ioctl ... */
- __u8 version;
- __u8 size;
- __u8 pad02[2];
- __u32 data;
- __u64 addr;
-};
-
-struct nvif_ioctl_wr_v0 {
- /* nvif_ioctl ... */
- __u8 version;
- __u8 size;
- __u8 pad02[2];
- __u32 data;
- __u64 addr;
-};
-
struct nvif_ioctl_map_v0 {
/* nvif_ioctl ... */
__u8 version;
diff --git a/drivers/gpu/drm/nouveau/include/nvif/object.h b/drivers/gpu/drm/nouveau/include/nvif/object.h
index f52399caee82..8d205b6af46a 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/object.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/object.h
@@ -34,8 +34,6 @@ void nvif_object_dtor(struct nvif_object *);
int nvif_object_ioctl(struct nvif_object *, void *, u32, void **);
int nvif_object_sclass_get(struct nvif_object *, struct nvif_sclass **);
void nvif_object_sclass_put(struct nvif_sclass **);
-u32 nvif_object_rd(struct nvif_object *, int, u64);
-void nvif_object_wr(struct nvif_object *, int, u64, u32);
int nvif_object_mthd(struct nvif_object *, u32, void *, u32);
int nvif_object_map_handle(struct nvif_object *, void *, u32,
u64 *handle, u64 *length);
@@ -47,20 +45,11 @@ void nvif_object_unmap(struct nvif_object *);
#define nvif_object(a) (a)->object
#define nvif_rd(a,f,b,c) ({ \
- struct nvif_object *_object = (a); \
- u32 _data; \
- if (likely(_object->map.ptr)) \
- _data = f((u8 __iomem *)_object->map.ptr + (c)); \
- else \
- _data = nvif_object_rd(_object, (b), (c)); \
+ u32 _data = f((u8 __iomem *)(a)->map.ptr + (c)); \
_data; \
})
#define nvif_wr(a,f,b,c,d) ({ \
- struct nvif_object *_object = (a); \
- if (likely(_object->map.ptr)) \
- f((d), (u8 __iomem *)_object->map.ptr + (c)); \
- else \
- nvif_object_wr(_object, (b), (c), (d)); \
+ f((d), (u8 __iomem *)(a)->map.ptr + (c)); \
})
#define nvif_rd08(a,b) ({ ((u8)nvif_rd((a), ioread8, 1, (b))); })
#define nvif_rd16(a,b) ({ ((u16)nvif_rd((a), ioread16_native, 2, (b))); })
@@ -69,7 +58,7 @@ void nvif_object_unmap(struct nvif_object *);
#define nvif_wr16(a,b,c) nvif_wr((a), iowrite16_native, 2, (b), (u16)(c))
#define nvif_wr32(a,b,c) nvif_wr((a), iowrite32_native, 4, (b), (u32)(c))
#define nvif_mask(a,b,c,d) ({ \
- struct nvif_object *__object = (a); \
+ typeof(a) __object = (a); \
u32 _addr = (b), _data = nvif_rd32(__object, _addr); \
nvif_wr32(__object, _addr, (_data & ~(c)) | (d)); \
_data; \
@@ -134,11 +123,4 @@ struct nvif_mclass {
#define NVIF_MR32(p,A...) DRF_MR(NVIF_RD32_, NVIF_WR32_, u32, (p), 0, ##A)
#define NVIF_MV32(p,A...) DRF_MV(NVIF_RD32_, NVIF_WR32_, u32, (p), 0, ##A)
#define NVIF_MD32(p,A...) DRF_MD(NVIF_RD32_, NVIF_WR32_, u32, (p), 0, ##A)
-
-/*XXX*/
-#include <core/object.h>
-#define nvxx_object(a) ({ \
- struct nvif_object *_object = (a); \
- (struct nvkm_object *)_object->priv; \
-})
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvif/os.h b/drivers/gpu/drm/nouveau/include/nvif/os.h
index 429d0106c123..a2eaf3929ac3 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/os.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/os.h
@@ -34,4 +34,23 @@
#include <soc/tegra/fuse.h>
#include <soc/tegra/pmc.h>
+
+#ifdef __BIG_ENDIAN
+#define ioread16_native ioread16be
+#define iowrite16_native iowrite16be
+#define ioread32_native ioread32be
+#define iowrite32_native iowrite32be
+#else
+#define ioread16_native ioread16
+#define iowrite16_native iowrite16
+#define ioread32_native ioread32
+#define iowrite32_native iowrite32
+#endif
+
+#define iowrite64_native(v,p) do { \
+ u32 __iomem *_p = (u32 __iomem *)(p); \
+ u64 _v = (v); \
+ iowrite32_native(lower_32_bits(_v), &_p[0]); \
+ iowrite32_native(upper_32_bits(_v), &_p[1]); \
+} while(0)
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/client.h b/drivers/gpu/drm/nouveau/include/nvkm/core/client.h
index 932c9fd0b2d8..15f27fdd877a 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/client.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/client.h
@@ -22,7 +22,6 @@ struct nvkm_client {
int nvkm_client_new(const char *name, u64 device, const char *cfg, const char *dbg,
int (*)(u64, void *, u32), struct nvkm_client **);
-struct nvkm_client *nvkm_client_search(struct nvkm_client *, u64 handle);
/* logging for client-facing objects */
#define nvif_printk(o,l,p,f,a...) do { \
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/device.h b/drivers/gpu/drm/nouveau/include/nvkm/core/device.h
index f057d348221e..46afb877a296 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/device.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/device.h
@@ -109,7 +109,6 @@ struct nvkm_device_chip {
};
struct nvkm_device *nvkm_device_find(u64 name);
-int nvkm_device_list(u64 *name, int size);
/* privileged register interface accessor macros */
#define nvkm_rd08(d,a) ioread8((d)->pri + (a))
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/layout.h b/drivers/gpu/drm/nouveau/include/nvkm/core/layout.h
index 30c17db483cb..9d2a1abf64f9 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/layout.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/layout.h
@@ -46,7 +46,6 @@ NVKM_LAYOUT_INST(NVKM_ENGINE_NVDEC , struct nvkm_nvdec , nvdec, 8)
NVKM_LAYOUT_INST(NVKM_ENGINE_NVENC , struct nvkm_nvenc , nvenc, 3)
NVKM_LAYOUT_INST(NVKM_ENGINE_NVJPG , struct nvkm_engine , nvjpg, 8)
NVKM_LAYOUT_ONCE(NVKM_ENGINE_OFA , struct nvkm_engine , ofa)
-NVKM_LAYOUT_ONCE(NVKM_ENGINE_PM , struct nvkm_pm , pm)
NVKM_LAYOUT_ONCE(NVKM_ENGINE_SEC , struct nvkm_engine , sec)
NVKM_LAYOUT_ONCE(NVKM_ENGINE_SEC2 , struct nvkm_sec2 , sec2)
NVKM_LAYOUT_ONCE(NVKM_ENGINE_SW , struct nvkm_sw , sw)
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/object.h b/drivers/gpu/drm/nouveau/include/nvkm/core/object.h
index ed1f66360782..10107ef3ca49 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/object.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/object.h
@@ -15,8 +15,6 @@ struct nvkm_object {
struct list_head head;
struct list_head tree;
- u8 route;
- u64 token;
u64 object;
struct rb_node node;
};
@@ -35,12 +33,6 @@ struct nvkm_object_func {
int (*map)(struct nvkm_object *, void *argv, u32 argc,
enum nvkm_object_map *, u64 *addr, u64 *size);
int (*unmap)(struct nvkm_object *);
- int (*rd08)(struct nvkm_object *, u64 addr, u8 *data);
- int (*rd16)(struct nvkm_object *, u64 addr, u16 *data);
- int (*rd32)(struct nvkm_object *, u64 addr, u32 *data);
- int (*wr08)(struct nvkm_object *, u64 addr, u8 data);
- int (*wr16)(struct nvkm_object *, u64 addr, u16 data);
- int (*wr32)(struct nvkm_object *, u64 addr, u32 data);
int (*bind)(struct nvkm_object *, struct nvkm_gpuobj *, int align,
struct nvkm_gpuobj **);
int (*sclass)(struct nvkm_object *, int index, struct nvkm_oclass *);
@@ -63,12 +55,6 @@ int nvkm_object_ntfy(struct nvkm_object *, u32 mthd, struct nvkm_event **);
int nvkm_object_map(struct nvkm_object *, void *argv, u32 argc,
enum nvkm_object_map *, u64 *addr, u64 *size);
int nvkm_object_unmap(struct nvkm_object *);
-int nvkm_object_rd08(struct nvkm_object *, u64 addr, u8 *data);
-int nvkm_object_rd16(struct nvkm_object *, u64 addr, u16 *data);
-int nvkm_object_rd32(struct nvkm_object *, u64 addr, u32 *data);
-int nvkm_object_wr08(struct nvkm_object *, u64 addr, u8 data);
-int nvkm_object_wr16(struct nvkm_object *, u64 addr, u16 data);
-int nvkm_object_wr32(struct nvkm_object *, u64 addr, u32 data);
int nvkm_object_bind(struct nvkm_object *, struct nvkm_gpuobj *, int align,
struct nvkm_gpuobj **);
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/oclass.h b/drivers/gpu/drm/nouveau/include/nvkm/core/oclass.h
index 8e1b945d38f3..cad05f0e7948 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/oclass.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/oclass.h
@@ -21,8 +21,6 @@ struct nvkm_oclass {
const void *priv;
const void *engn;
u32 handle;
- u8 route;
- u64 token;
u64 object;
struct nvkm_client *client;
struct nvkm_object *parent;
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/os.h b/drivers/gpu/drm/nouveau/include/nvkm/core/os.h
index 3fd5c007a663..9b05612e6490 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/os.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/os.h
@@ -3,25 +3,6 @@
#define __NVKM_OS_H__
#include <nvif/os.h>
-#ifdef __BIG_ENDIAN
-#define ioread16_native ioread16be
-#define iowrite16_native iowrite16be
-#define ioread32_native ioread32be
-#define iowrite32_native iowrite32be
-#else
-#define ioread16_native ioread16
-#define iowrite16_native iowrite16
-#define ioread32_native ioread32
-#define iowrite32_native iowrite32
-#endif
-
-#define iowrite64_native(v,p) do { \
- u32 __iomem *_p = (u32 __iomem *)(p); \
- u64 _v = (v); \
- iowrite32_native(lower_32_bits(_v), &_p[0]); \
- iowrite32_native(upper_32_bits(_v), &_p[1]); \
-} while(0)
-
struct nvkm_blob {
void *data;
u32 size;
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/pci.h b/drivers/gpu/drm/nouveau/include/nvkm/core/pci.h
index b4b5df3e1610..7444c4d59e09 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/pci.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/pci.h
@@ -10,6 +10,5 @@ struct nvkm_device_pci {
};
int nvkm_device_pci_new(struct pci_dev *, const char *cfg, const char *dbg,
- bool detect, bool mmio, u64 subdev_mask,
struct nvkm_device **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/tegra.h b/drivers/gpu/drm/nouveau/include/nvkm/core/tegra.h
index ccee53d4e4ec..22f74fc88cd7 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/tegra.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/tegra.h
@@ -51,6 +51,5 @@ struct nvkm_device_tegra_func {
int nvkm_device_tegra_new(const struct nvkm_device_tegra_func *,
struct platform_device *,
const char *cfg, const char *dbg,
- bool detect, bool mmio, u64 subdev_mask,
struct nvkm_device **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/pm.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/pm.h
deleted file mode 100644
index af89d46ea360..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/pm.h
+++ /dev/null
@@ -1,29 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-#ifndef __NVKM_PM_H__
-#define __NVKM_PM_H__
-#include <core/engine.h>
-
-struct nvkm_pm {
- const struct nvkm_pm_func *func;
- struct nvkm_engine engine;
-
- struct {
- spinlock_t lock;
- struct nvkm_object *object;
- } client;
-
- struct list_head domains;
- struct list_head sources;
- u32 sequence;
-};
-
-int nv40_pm_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_pm **);
-int nv50_pm_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_pm **);
-int g84_pm_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_pm **);
-int gt200_pm_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_pm **);
-int gt215_pm_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_pm **);
-int gf100_pm_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_pm **);
-int gf108_pm_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_pm **);
-int gf117_pm_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_pm **);
-int gk104_pm_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_pm **);
-#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.c b/drivers/gpu/drm/nouveau/nouveau_abi16.c
index d56909071de6..2a0617e5fe2a 100644
--- a/drivers/gpu/drm/nouveau/nouveau_abi16.c
+++ b/drivers/gpu/drm/nouveau/nouveau_abi16.c
@@ -46,23 +46,9 @@ nouveau_abi16(struct drm_file *file_priv)
struct nouveau_abi16 *abi16;
cli->abi16 = abi16 = kzalloc(sizeof(*abi16), GFP_KERNEL);
if (cli->abi16) {
- struct nv_device_v0 args = {
- .device = ~0ULL,
- };
-
+ abi16->cli = cli;
INIT_LIST_HEAD(&abi16->channels);
-
- /* allocate device object targeting client's default
- * device (ie. the one that belongs to the fd it
- * opened)
- */
- if (nvif_device_ctor(&cli->base.object, "abi16Device",
- 0, NV_DEVICE, &args, sizeof(args),
- &abi16->device) == 0)
- return cli->abi16;
-
- kfree(cli->abi16);
- cli->abi16 = NULL;
+ INIT_LIST_HEAD(&abi16->objects);
}
}
return cli->abi16;
@@ -82,11 +68,72 @@ nouveau_abi16_get(struct drm_file *file_priv)
int
nouveau_abi16_put(struct nouveau_abi16 *abi16, int ret)
{
- struct nouveau_cli *cli = (void *)abi16->device.object.client;
+ struct nouveau_cli *cli = abi16->cli;
mutex_unlock(&cli->mutex);
return ret;
}
+/* Tracks objects created via the DRM_NOUVEAU_NVIF ioctl.
+ *
+ * The only two types of object that userspace ever allocated via this
+ * interface are 'device', in order to retrieve basic device info, and
+ * 'engine objects', which instantiate HW classes on a channel.
+ *
+ * The remainder of what used to be available via DRM_NOUVEAU_NVIF has
+ * been removed, but these object types need to be tracked to maintain
+ * compatibility with userspace.
+ */
+struct nouveau_abi16_obj {
+ enum nouveau_abi16_obj_type {
+ DEVICE,
+ ENGOBJ,
+ } type;
+ u64 object;
+
+ struct nvif_object engobj;
+
+ struct list_head head; /* protected by nouveau_abi16.cli.mutex */
+};
+
+static struct nouveau_abi16_obj *
+nouveau_abi16_obj_find(struct nouveau_abi16 *abi16, u64 object)
+{
+ struct nouveau_abi16_obj *obj;
+
+ list_for_each_entry(obj, &abi16->objects, head) {
+ if (obj->object == object)
+ return obj;
+ }
+
+ return NULL;
+}
+
+static void
+nouveau_abi16_obj_del(struct nouveau_abi16_obj *obj)
+{
+ list_del(&obj->head);
+ kfree(obj);
+}
+
+static struct nouveau_abi16_obj *
+nouveau_abi16_obj_new(struct nouveau_abi16 *abi16, enum nouveau_abi16_obj_type type, u64 object)
+{
+ struct nouveau_abi16_obj *obj;
+
+ obj = nouveau_abi16_obj_find(abi16, object);
+ if (obj)
+ return ERR_PTR(-EEXIST);
+
+ obj = kzalloc(sizeof(*obj), GFP_KERNEL);
+ if (!obj)
+ return ERR_PTR(-ENOMEM);
+
+ obj->type = type;
+ obj->object = object;
+ list_add_tail(&obj->head, &abi16->objects);
+ return obj;
+}
+
s32
nouveau_abi16_swclass(struct nouveau_drm *drm)
{
@@ -164,17 +211,20 @@ nouveau_abi16_chan_fini(struct nouveau_abi16 *abi16,
void
nouveau_abi16_fini(struct nouveau_abi16 *abi16)
{
- struct nouveau_cli *cli = (void *)abi16->device.object.client;
+ struct nouveau_cli *cli = abi16->cli;
struct nouveau_abi16_chan *chan, *temp;
+ struct nouveau_abi16_obj *obj, *tmp;
+
+ /* cleanup objects */
+ list_for_each_entry_safe(obj, tmp, &abi16->objects, head) {
+ nouveau_abi16_obj_del(obj);
+ }
/* cleanup channels */
list_for_each_entry_safe(chan, temp, &abi16->channels, head) {
nouveau_abi16_chan_fini(abi16, chan);
}
- /* destroy the device object */
- nvif_device_dtor(&abi16->device);
-
kfree(cli->abi16);
cli->abi16 = NULL;
}
@@ -199,8 +249,8 @@ nouveau_abi16_ioctl_getparam(ABI16_IOCTL_ARGS)
struct nouveau_cli *cli = nouveau_cli(file_priv);
struct nouveau_drm *drm = nouveau_drm(dev);
struct nvif_device *device = &drm->client.device;
- struct nvkm_device *nvkm_device = nvxx_device(&drm->client.device);
- struct nvkm_gr *gr = nvxx_gr(device);
+ struct nvkm_device *nvkm_device = nvxx_device(drm);
+ struct nvkm_gr *gr = nvxx_gr(drm);
struct drm_nouveau_getparam *getparam = data;
struct pci_dev *pdev = to_pci_dev(dev->dev);
@@ -291,7 +341,7 @@ nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS)
struct nouveau_drm *drm = nouveau_drm(dev);
struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv);
struct nouveau_abi16_chan *chan;
- struct nvif_device *device;
+ struct nvif_device *device = &cli->device;
u64 engine, runm;
int ret;
@@ -308,7 +358,6 @@ nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS)
*/
__nouveau_cli_disable_uvmm_noinit(cli);
- device = &abi16->device;
engine = NV_DEVICE_HOST_RUNLIST_ENGINES_GR;
/* hack to allow channel engine type specification on kepler */
@@ -356,7 +405,7 @@ nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS)
list_add(&chan->head, &abi16->channels);
/* create channel object and initialise dma and fence management */
- ret = nouveau_channel_new(drm, device, false, runm, init->fb_ctxdma_handle,
+ ret = nouveau_channel_new(cli, false, runm, init->fb_ctxdma_handle,
init->tt_ctxdma_handle, &chan->chan);
if (ret)
goto done;
@@ -458,44 +507,6 @@ nouveau_abi16_chan(struct nouveau_abi16 *abi16, int channel)
}
int
-nouveau_abi16_usif(struct drm_file *file_priv, void *data, u32 size)
-{
- union {
- struct nvif_ioctl_v0 v0;
- } *args = data;
- struct nouveau_abi16_chan *chan;
- struct nouveau_abi16 *abi16;
- int ret = -ENOSYS;
-
- if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, true))) {
- switch (args->v0.type) {
- case NVIF_IOCTL_V0_NEW:
- case NVIF_IOCTL_V0_MTHD:
- case NVIF_IOCTL_V0_SCLASS:
- break;
- default:
- return -EACCES;
- }
- } else
- return ret;
-
- if (!(abi16 = nouveau_abi16(file_priv)))
- return -ENOMEM;
-
- if (args->v0.token != ~0ULL) {
- if (!(chan = nouveau_abi16_chan(abi16, args->v0.token)))
- return -EINVAL;
- args->v0.object = nvif_handle(&chan->chan->user);
- args->v0.owner = NVIF_IOCTL_V0_OWNER_ANY;
- return 0;
- }
-
- args->v0.object = nvif_handle(&abi16->device.object);
- args->v0.owner = NVIF_IOCTL_V0_OWNER_ANY;
- return 0;
-}
-
-int
nouveau_abi16_ioctl_channel_free(ABI16_IOCTL_ARGS)
{
struct drm_nouveau_channel_free *req = data;
@@ -519,7 +530,6 @@ nouveau_abi16_ioctl_grobj_alloc(ABI16_IOCTL_ARGS)
struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv);
struct nouveau_abi16_chan *chan;
struct nouveau_abi16_ntfy *ntfy;
- struct nvif_client *client;
struct nvif_sclass *sclass;
s32 oclass = 0;
int ret, i;
@@ -529,7 +539,6 @@ nouveau_abi16_ioctl_grobj_alloc(ABI16_IOCTL_ARGS)
if (init->handle == ~0)
return nouveau_abi16_put(abi16, -EINVAL);
- client = abi16->device.object.client;
chan = nouveau_abi16_chan(abi16, init->channel);
if (!chan)
@@ -594,10 +603,8 @@ nouveau_abi16_ioctl_grobj_alloc(ABI16_IOCTL_ARGS)
list_add(&ntfy->head, &chan->notifiers);
- client->route = NVDRM_OBJECT_ABI16;
ret = nvif_object_ctor(&chan->chan->user, "abi16EngObj", init->handle,
oclass, NULL, 0, &ntfy->object);
- client->route = NVDRM_OBJECT_NVIF;
if (ret)
nouveau_abi16_ntfy_fini(chan, ntfy);
@@ -612,18 +619,17 @@ nouveau_abi16_ioctl_notifierobj_alloc(ABI16_IOCTL_ARGS)
struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv);
struct nouveau_abi16_chan *chan;
struct nouveau_abi16_ntfy *ntfy;
- struct nvif_device *device = &abi16->device;
- struct nvif_client *client;
+ struct nvif_device *device;
struct nv_dma_v0 args = {};
int ret;
if (unlikely(!abi16))
return -ENOMEM;
+ device = &abi16->cli->device;
/* completely unnecessary for these chipsets... */
if (unlikely(device->info.family >= NV_DEVICE_INFO_V0_FERMI))
return nouveau_abi16_put(abi16, -EINVAL);
- client = abi16->device.object.client;
chan = nouveau_abi16_chan(abi16, info->channel);
if (!chan)
@@ -660,11 +666,9 @@ nouveau_abi16_ioctl_notifierobj_alloc(ABI16_IOCTL_ARGS)
args.limit += chan->ntfy->offset;
}
- client->route = NVDRM_OBJECT_ABI16;
ret = nvif_object_ctor(&chan->chan->user, "abi16Ntfy", info->handle,
NV_DMA_IN_MEMORY, &args, sizeof(args),
&ntfy->object);
- client->route = NVDRM_OBJECT_NVIF;
if (ret)
goto done;
@@ -704,3 +708,183 @@ nouveau_abi16_ioctl_gpuobj_free(ABI16_IOCTL_ARGS)
return nouveau_abi16_put(abi16, ret);
}
+
+static int
+nouveau_abi16_ioctl_mthd(struct nouveau_abi16 *abi16, struct nvif_ioctl_v0 *ioctl, u32 argc)
+{
+ struct nouveau_cli *cli = abi16->cli;
+ struct nvif_ioctl_mthd_v0 *args;
+ struct nouveau_abi16_obj *obj;
+ struct nv_device_info_v0 *info;
+
+ if (ioctl->route || argc < sizeof(*args))
+ return -EINVAL;
+ args = (void *)ioctl->data;
+ argc -= sizeof(*args);
+
+ obj = nouveau_abi16_obj_find(abi16, ioctl->object);
+ if (!obj || obj->type != DEVICE)
+ return -EINVAL;
+
+ if (args->method != NV_DEVICE_V0_INFO ||
+ argc != sizeof(*info))
+ return -EINVAL;
+
+ info = (void *)args->data;
+ if (info->version != 0x00)
+ return -EINVAL;
+
+ info = &cli->device.info;
+ memcpy(args->data, info, sizeof(*info));
+ return 0;
+}
+
+static int
+nouveau_abi16_ioctl_del(struct nouveau_abi16 *abi16, struct nvif_ioctl_v0 *ioctl, u32 argc)
+{
+ struct nouveau_abi16_obj *obj;
+
+ if (ioctl->route || argc)
+ return -EINVAL;
+
+ obj = nouveau_abi16_obj_find(abi16, ioctl->object);
+ if (obj) {
+ if (obj->type == ENGOBJ)
+ nvif_object_dtor(&obj->engobj);
+ nouveau_abi16_obj_del(obj);
+ }
+
+ return 0;
+}
+
+static int
+nouveau_abi16_ioctl_new(struct nouveau_abi16 *abi16, struct nvif_ioctl_v0 *ioctl, u32 argc)
+{
+ struct nvif_ioctl_new_v0 *args;
+ struct nouveau_abi16_chan *chan;
+ struct nouveau_abi16_obj *obj;
+ int ret;
+
+ if (argc < sizeof(*args))
+ return -EINVAL;
+ args = (void *)ioctl->data;
+ argc -= sizeof(*args);
+
+ if (args->version != 0)
+ return -EINVAL;
+
+ if (!ioctl->route) {
+ if (ioctl->object || args->oclass != NV_DEVICE)
+ return -EINVAL;
+
+ obj = nouveau_abi16_obj_new(abi16, DEVICE, args->object);
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
+
+ return 0;
+ }
+
+ chan = nouveau_abi16_chan(abi16, ioctl->token);
+ if (!chan)
+ return -EINVAL;
+
+ obj = nouveau_abi16_obj_new(abi16, ENGOBJ, args->object);
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
+
+ ret = nvif_object_ctor(&chan->chan->user, "abi16EngObj", args->handle, args->oclass,
+ NULL, 0, &obj->engobj);
+ if (ret)
+ nouveau_abi16_obj_del(obj);
+
+ return ret;
+}
+
+static int
+nouveau_abi16_ioctl_sclass(struct nouveau_abi16 *abi16, struct nvif_ioctl_v0 *ioctl, u32 argc)
+{
+ struct nvif_ioctl_sclass_v0 *args;
+ struct nouveau_abi16_chan *chan;
+ struct nvif_sclass *sclass;
+ int ret;
+
+ if (!ioctl->route || argc < sizeof(*args))
+ return -EINVAL;
+ args = (void *)ioctl->data;
+ argc -= sizeof(*args);
+
+ if (argc != args->count * sizeof(args->oclass[0]))
+ return -EINVAL;
+
+ chan = nouveau_abi16_chan(abi16, ioctl->token);
+ if (!chan)
+ return -EINVAL;
+
+ ret = nvif_object_sclass_get(&chan->chan->user, &sclass);
+ if (ret < 0)
+ return ret;
+
+ for (int i = 0; i < min_t(u8, args->count, ret); i++) {
+ args->oclass[i].oclass = sclass[i].oclass;
+ args->oclass[i].minver = sclass[i].minver;
+ args->oclass[i].maxver = sclass[i].maxver;
+ }
+ args->count = ret;
+
+ nvif_object_sclass_put(&sclass);
+ return 0;
+}
+
+int
+nouveau_abi16_ioctl(struct drm_file *filp, void __user *user, u32 size)
+{
+ struct nvif_ioctl_v0 *ioctl;
+ struct nouveau_abi16 *abi16;
+ u32 argc = size;
+ int ret;
+
+ if (argc < sizeof(*ioctl))
+ return -EINVAL;
+ argc -= sizeof(*ioctl);
+
+ ioctl = kmalloc(size, GFP_KERNEL);
+ if (!ioctl)
+ return -ENOMEM;
+
+ ret = -EFAULT;
+ if (copy_from_user(ioctl, user, size))
+ goto done_free;
+
+ if (ioctl->version != 0x00 ||
+ (ioctl->route && ioctl->route != 0xff)) {
+ ret = -EINVAL;
+ goto done_free;
+ }
+
+ abi16 = nouveau_abi16_get(filp);
+ if (unlikely(!abi16)) {
+ ret = -ENOMEM;
+ goto done_free;
+ }
+
+ switch (ioctl->type) {
+ case NVIF_IOCTL_V0_SCLASS: ret = nouveau_abi16_ioctl_sclass(abi16, ioctl, argc); break;
+ case NVIF_IOCTL_V0_NEW : ret = nouveau_abi16_ioctl_new (abi16, ioctl, argc); break;
+ case NVIF_IOCTL_V0_DEL : ret = nouveau_abi16_ioctl_del (abi16, ioctl, argc); break;
+ case NVIF_IOCTL_V0_MTHD : ret = nouveau_abi16_ioctl_mthd (abi16, ioctl, argc); break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ nouveau_abi16_put(abi16, 0);
+
+ if (ret == 0) {
+ if (copy_to_user(user, ioctl, size))
+ ret = -EFAULT;
+ }
+
+done_free:
+ kfree(ioctl);
+ return ret;
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.h b/drivers/gpu/drm/nouveau/nouveau_abi16.h
index 661b901d8ecc..af6b4e1cefd2 100644
--- a/drivers/gpu/drm/nouveau/nouveau_abi16.h
+++ b/drivers/gpu/drm/nouveau/nouveau_abi16.h
@@ -30,16 +30,16 @@ struct nouveau_abi16_chan {
};
struct nouveau_abi16 {
- struct nvif_device device;
+ struct nouveau_cli *cli;
struct list_head channels;
- u64 handles;
+ struct list_head objects;
};
struct nouveau_abi16 *nouveau_abi16_get(struct drm_file *);
int nouveau_abi16_put(struct nouveau_abi16 *, int);
void nouveau_abi16_fini(struct nouveau_abi16 *);
s32 nouveau_abi16_swclass(struct nouveau_drm *);
-int nouveau_abi16_usif(struct drm_file *, void *data, u32 size);
+int nouveau_abi16_ioctl(struct drm_file *, void __user *user, u32 size);
#define NOUVEAU_GEM_DOMAIN_VRAM (1 << 1)
#define NOUVEAU_GEM_DOMAIN_GART (1 << 2)
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
index 8c3c1f1e01c5..c8335f5b49db 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
@@ -2015,7 +2015,7 @@ uint8_t *nouveau_bios_embedded_edid(struct drm_device *dev)
static bool NVInitVBIOS(struct drm_device *dev)
{
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nvkm_bios *bios = nvxx_bios(&drm->client.device);
+ struct nvkm_bios *bios = nvxx_bios(drm);
struct nvbios *legacy = &drm->vbios;
memset(legacy, 0, sizeof(struct nvbios));
@@ -2086,7 +2086,7 @@ nouveau_bios_init(struct drm_device *dev)
/* only relevant for PCI devices */
if (!dev_is_pci(dev->dev) ||
- nvkm_gsp_rm(nvxx_device(&drm->client.device)->gsp))
+ nvkm_gsp_rm(nvxx_device(drm)->gsp))
return 0;
if (!NVInitVBIOS(dev))
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.h b/drivers/gpu/drm/nouveau/nouveau_bios.h
index 18eb061ccafb..62b5f5889041 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.h
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.h
@@ -48,6 +48,7 @@ struct bit_entry {
int bit_table(struct drm_device *, u8 id, struct bit_entry *);
+#include <subdev/bios.h>
#include <subdev/bios/dcb.h>
#include <subdev/bios/conn.h>
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 0712d0b15170..db961eade225 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -58,7 +58,7 @@ nv10_bo_update_tile_region(struct drm_device *dev, struct nouveau_drm_tile *reg,
{
struct nouveau_drm *drm = nouveau_drm(dev);
int i = reg - drm->tile.reg;
- struct nvkm_fb *fb = nvxx_fb(&drm->client.device);
+ struct nvkm_fb *fb = nvxx_fb(drm);
struct nvkm_fb_tile *tile = &fb->tile.region[i];
nouveau_fence_unref(&reg->fence);
@@ -109,7 +109,7 @@ nv10_bo_set_tiling(struct drm_device *dev, u32 addr,
u32 size, u32 pitch, u32 zeta)
{
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nvkm_fb *fb = nvxx_fb(&drm->client.device);
+ struct nvkm_fb *fb = nvxx_fb(drm);
struct nouveau_drm_tile *tile, *found = NULL;
int i;
@@ -859,7 +859,7 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict,
{
struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
struct nouveau_channel *chan = drm->ttm.chan;
- struct nouveau_cli *cli = (void *)chan->user.client;
+ struct nouveau_cli *cli = chan->cli;
struct nouveau_fence *fence;
int ret;
@@ -898,7 +898,7 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict,
* Without this the operation can timeout and we'll fallback to a
* software copy, which might take several minutes to finish.
*/
- nouveau_fence_wait(fence, false);
+ nouveau_fence_wait(fence, false, false);
ret = ttm_bo_move_accel_cleanup(bo, &fence->base, evict, false,
new_reg);
nouveau_fence_unref(&fence);
@@ -1171,7 +1171,7 @@ static int
nouveau_ttm_io_mem_reserve(struct ttm_device *bdev, struct ttm_resource *reg)
{
struct nouveau_drm *drm = nouveau_bdev(bdev);
- struct nvkm_device *device = nvxx_device(&drm->client.device);
+ struct nvkm_device *device = nvxx_device(drm);
struct nouveau_mem *mem = nouveau_mem(reg);
struct nvif_mmu *mmu = &drm->client.mmu;
int ret;
@@ -1291,7 +1291,7 @@ vm_fault_t nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
{
struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
struct nouveau_bo *nvbo = nouveau_bo(bo);
- struct nvkm_device *device = nvxx_device(&drm->client.device);
+ struct nvkm_device *device = nvxx_device(drm);
u32 mappable = device->func->resource_size(device, 1) >> PAGE_SHIFT;
int i, ret;
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.h b/drivers/gpu/drm/nouveau/nouveau_bo.h
index 4e891752c255..596a63a50a20 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.h
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.h
@@ -53,25 +53,10 @@ nouveau_bo(struct ttm_buffer_object *bo)
return container_of(bo, struct nouveau_bo, bo);
}
-static inline int
-nouveau_bo_ref(struct nouveau_bo *ref, struct nouveau_bo **pnvbo)
+static inline void
+nouveau_bo_fini(struct nouveau_bo *bo)
{
- struct nouveau_bo *prev;
-
- if (!pnvbo)
- return -EINVAL;
- prev = *pnvbo;
-
- if (ref) {
- ttm_bo_get(&ref->bo);
- *pnvbo = nouveau_bo(&ref->bo);
- } else {
- *pnvbo = NULL;
- }
- if (prev)
- ttm_bo_put(&prev->bo);
-
- return 0;
+ ttm_bo_put(&bo->bo);
}
extern struct ttm_device_funcs nouveau_bo_driver;
@@ -115,35 +100,6 @@ nvbo_kmap_obj_iovirtual(struct nouveau_bo *nvbo)
return ioptr;
}
-static inline void
-nouveau_bo_unmap_unpin_unref(struct nouveau_bo **pnvbo)
-{
- if (*pnvbo) {
- nouveau_bo_unmap(*pnvbo);
- nouveau_bo_unpin(*pnvbo);
- nouveau_bo_ref(NULL, pnvbo);
- }
-}
-
-static inline int
-nouveau_bo_new_pin_map(struct nouveau_cli *cli, u64 size, int align, u32 domain,
- struct nouveau_bo **pnvbo)
-{
- int ret = nouveau_bo_new(cli, size, align, domain,
- 0, 0, NULL, NULL, pnvbo);
- if (ret == 0) {
- ret = nouveau_bo_pin(*pnvbo, domain, true);
- if (ret == 0) {
- ret = nouveau_bo_map(*pnvbo);
- if (ret == 0)
- return ret;
- nouveau_bo_unpin(*pnvbo);
- }
- nouveau_bo_ref(NULL, pnvbo);
- }
- return ret;
-}
-
int nv04_bo_move_init(struct nouveau_channel *, u32);
int nv04_bo_move_m2mf(struct nouveau_channel *, struct ttm_buffer_object *,
struct ttm_resource *, struct ttm_resource *);
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo0039.c b/drivers/gpu/drm/nouveau/nouveau_bo0039.c
index e2ce44adaa5c..0b6758e024a1 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo0039.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo0039.c
@@ -47,7 +47,7 @@ int
nv04_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
struct ttm_resource *old_reg, struct ttm_resource *new_reg)
{
- struct nvif_push *push = chan->chan.push;
+ struct nvif_push *push = &chan->chan.push;
u32 src_ctxdma = nouveau_bo_mem_ctxdma(bo, chan, old_reg);
u32 src_offset = old_reg->start << PAGE_SHIFT;
u32 dst_ctxdma = nouveau_bo_mem_ctxdma(bo, chan, new_reg);
@@ -96,7 +96,7 @@ nv04_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
int
nv04_bo_move_init(struct nouveau_channel *chan, u32 handle)
{
- struct nvif_push *push = chan->chan.push;
+ struct nvif_push *push = &chan->chan.push;
int ret;
ret = PUSH_WAIT(push, 4);
@@ -104,6 +104,6 @@ nv04_bo_move_init(struct nouveau_channel *chan, u32 handle)
return ret;
PUSH_MTHD(push, NV039, SET_OBJECT, handle);
- PUSH_MTHD(push, NV039, SET_CONTEXT_DMA_NOTIFIES, chan->drm->ntfy.handle);
+ PUSH_MTHD(push, NV039, SET_CONTEXT_DMA_NOTIFIES, chan->cli->drm->ntfy.handle);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo5039.c b/drivers/gpu/drm/nouveau/nouveau_bo5039.c
index c6cf3629a9f9..c3de17548d97 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo5039.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo5039.c
@@ -40,7 +40,7 @@ nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
struct ttm_resource *old_reg, struct ttm_resource *new_reg)
{
struct nouveau_mem *mem = nouveau_mem(old_reg);
- struct nvif_push *push = chan->chan.push;
+ struct nvif_push *push = &chan->chan.push;
u64 length = new_reg->size;
u64 src_offset = mem->vma[0].addr;
u64 dst_offset = mem->vma[1].addr;
@@ -136,7 +136,7 @@ nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
int
nv50_bo_move_init(struct nouveau_channel *chan, u32 handle)
{
- struct nvif_push *push = chan->chan.push;
+ struct nvif_push *push = &chan->chan.push;
int ret;
ret = PUSH_WAIT(push, 6);
@@ -144,7 +144,7 @@ nv50_bo_move_init(struct nouveau_channel *chan, u32 handle)
return ret;
PUSH_MTHD(push, NV5039, SET_OBJECT, handle);
- PUSH_MTHD(push, NV5039, SET_CONTEXT_DMA_NOTIFY, chan->drm->ntfy.handle,
+ PUSH_MTHD(push, NV5039, SET_CONTEXT_DMA_NOTIFY, chan->cli->drm->ntfy.handle,
SET_CONTEXT_DMA_BUFFER_IN, chan->vram.handle,
SET_CONTEXT_DMA_BUFFER_OUT, chan->vram.handle);
return 0;
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo74c1.c b/drivers/gpu/drm/nouveau/nouveau_bo74c1.c
index 9b7ba31fae13..e6ef79de2498 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo74c1.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo74c1.c
@@ -37,7 +37,7 @@ nv84_bo_move_exec(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
struct ttm_resource *old_reg, struct ttm_resource *new_reg)
{
struct nouveau_mem *mem = nouveau_mem(old_reg);
- struct nvif_push *push = chan->chan.push;
+ struct nvif_push *push = &chan->chan.push;
int ret;
ret = PUSH_WAIT(push, 7);
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo85b5.c b/drivers/gpu/drm/nouveau/nouveau_bo85b5.c
index a15a38a87a95..c4861d073ad4 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo85b5.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo85b5.c
@@ -41,7 +41,7 @@ nva3_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
struct ttm_resource *old_reg, struct ttm_resource *new_reg)
{
struct nouveau_mem *mem = nouveau_mem(old_reg);
- struct nvif_push *push = chan->chan.push;
+ struct nvif_push *push = &chan->chan.push;
u64 src_offset = mem->vma[0].addr;
u64 dst_offset = mem->vma[1].addr;
u32 page_count = PFN_UP(new_reg->size);
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo9039.c b/drivers/gpu/drm/nouveau/nouveau_bo9039.c
index d2bb2687d401..ad82269c7725 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo9039.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo9039.c
@@ -38,7 +38,7 @@ int
nvc0_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
struct ttm_resource *old_reg, struct ttm_resource *new_reg)
{
- struct nvif_push *push = chan->chan.push;
+ struct nvif_push *push = &chan->chan.push;
struct nouveau_mem *mem = nouveau_mem(old_reg);
u64 src_offset = mem->vma[0].addr;
u64 dst_offset = mem->vma[1].addr;
@@ -86,7 +86,7 @@ nvc0_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
int
nvc0_bo_move_init(struct nouveau_channel *chan, u32 handle)
{
- struct nvif_push *push = chan->chan.push;
+ struct nvif_push *push = &chan->chan.push;
int ret;
ret = PUSH_WAIT(push, 2);
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo90b5.c b/drivers/gpu/drm/nouveau/nouveau_bo90b5.c
index 4618f4f5ab56..5eaeef9d25e4 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo90b5.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo90b5.c
@@ -34,7 +34,7 @@ nvc0_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
struct ttm_resource *old_reg, struct ttm_resource *new_reg)
{
struct nouveau_mem *mem = nouveau_mem(old_reg);
- struct nvif_push *push = chan->chan.push;
+ struct nvif_push *push = &chan->chan.push;
u64 src_offset = mem->vma[0].addr;
u64 dst_offset = mem->vma[1].addr;
u32 page_count = PFN_UP(new_reg->size);
diff --git a/drivers/gpu/drm/nouveau/nouveau_boa0b5.c b/drivers/gpu/drm/nouveau/nouveau_boa0b5.c
index 07a5c6302c98..dff2ae0e1e45 100644
--- a/drivers/gpu/drm/nouveau/nouveau_boa0b5.c
+++ b/drivers/gpu/drm/nouveau/nouveau_boa0b5.c
@@ -39,7 +39,7 @@ nve0_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
struct ttm_resource *old_reg, struct ttm_resource *new_reg)
{
struct nouveau_mem *mem = nouveau_mem(old_reg);
- struct nvif_push *push = chan->chan.push;
+ struct nvif_push *push = &chan->chan.push;
int ret;
ret = PUSH_WAIT(push, 10);
@@ -78,7 +78,7 @@ nve0_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
int
nve0_bo_move_init(struct nouveau_channel *chan, u32 handle)
{
- struct nvif_push *push = chan->chan.push;
+ struct nvif_push *push = &chan->chan.push;
int ret;
ret = PUSH_WAIT(push, 2);
diff --git a/drivers/gpu/drm/nouveau/nouveau_chan.c b/drivers/gpu/drm/nouveau/nouveau_chan.c
index 66fca95c10c7..2cb2e5675807 100644
--- a/drivers/gpu/drm/nouveau/nouveau_chan.c
+++ b/drivers/gpu/drm/nouveau/nouveau_chan.c
@@ -52,7 +52,7 @@ static int
nouveau_channel_killed(struct nvif_event *event, void *repv, u32 repc)
{
struct nouveau_channel *chan = container_of(event, typeof(*chan), kill);
- struct nouveau_cli *cli = (void *)chan->user.client;
+ struct nouveau_cli *cli = chan->cli;
NV_PRINTK(warn, cli, "channel %d killed!\n", chan->chid);
@@ -66,19 +66,19 @@ int
nouveau_channel_idle(struct nouveau_channel *chan)
{
if (likely(chan && chan->fence && !atomic_read(&chan->killed))) {
- struct nouveau_cli *cli = (void *)chan->user.client;
+ struct nouveau_cli *cli = chan->cli;
struct nouveau_fence *fence = NULL;
int ret;
ret = nouveau_fence_new(&fence, chan);
if (!ret) {
- ret = nouveau_fence_wait(fence, false);
+ ret = nouveau_fence_wait(fence, false, false);
nouveau_fence_unref(&fence);
}
if (ret) {
NV_PRINTK(err, cli, "failed to idle channel %d [%s]\n",
- chan->chid, nvxx_client(&cli->base)->name);
+ chan->chid, cli->name);
return ret;
}
}
@@ -90,12 +90,10 @@ nouveau_channel_del(struct nouveau_channel **pchan)
{
struct nouveau_channel *chan = *pchan;
if (chan) {
- struct nouveau_cli *cli = (void *)chan->user.client;
-
if (chan->fence)
- nouveau_fence(chan->drm)->context_del(chan);
+ nouveau_fence(chan->cli->drm)->context_del(chan);
- if (cli)
+ if (nvif_object_constructed(&chan->user))
nouveau_svmm_part(chan->vmm->svmm, chan->inst);
nvif_object_dtor(&chan->blit);
@@ -110,7 +108,7 @@ nouveau_channel_del(struct nouveau_channel **pchan)
nouveau_bo_unmap(chan->push.buffer);
if (chan->push.buffer && chan->push.buffer->bo.pin_count)
nouveau_bo_unpin(chan->push.buffer);
- nouveau_bo_ref(NULL, &chan->push.buffer);
+ nouveau_bo_fini(chan->push.buffer);
kfree(chan);
}
*pchan = NULL;
@@ -119,33 +117,34 @@ nouveau_channel_del(struct nouveau_channel **pchan)
static void
nouveau_channel_kick(struct nvif_push *push)
{
- struct nouveau_channel *chan = container_of(push, typeof(*chan), chan._push);
- chan->dma.cur = chan->dma.cur + (chan->chan._push.cur - chan->chan._push.bgn);
+ struct nouveau_channel *chan = container_of(push, typeof(*chan), chan.push);
+ chan->dma.cur = chan->dma.cur + (chan->chan.push.cur - chan->chan.push.bgn);
FIRE_RING(chan);
- chan->chan._push.bgn = chan->chan._push.cur;
+ chan->chan.push.bgn = chan->chan.push.cur;
}
static int
nouveau_channel_wait(struct nvif_push *push, u32 size)
{
- struct nouveau_channel *chan = container_of(push, typeof(*chan), chan._push);
+ struct nouveau_channel *chan = container_of(push, typeof(*chan), chan.push);
int ret;
- chan->dma.cur = chan->dma.cur + (chan->chan._push.cur - chan->chan._push.bgn);
+ chan->dma.cur = chan->dma.cur + (chan->chan.push.cur - chan->chan.push.bgn);
ret = RING_SPACE(chan, size);
if (ret == 0) {
- chan->chan._push.bgn = chan->chan._push.mem.object.map.ptr;
- chan->chan._push.bgn = chan->chan._push.bgn + chan->dma.cur;
- chan->chan._push.cur = chan->chan._push.bgn;
- chan->chan._push.end = chan->chan._push.bgn + size;
+ chan->chan.push.bgn = chan->chan.push.mem.object.map.ptr;
+ chan->chan.push.bgn = chan->chan.push.bgn + chan->dma.cur;
+ chan->chan.push.cur = chan->chan.push.bgn;
+ chan->chan.push.end = chan->chan.push.bgn + size;
}
return ret;
}
static int
-nouveau_channel_prep(struct nouveau_drm *drm, struct nvif_device *device,
+nouveau_channel_prep(struct nouveau_cli *cli,
u32 size, struct nouveau_channel **pchan)
{
- struct nouveau_cli *cli = (void *)device->object.client;
+ struct nouveau_drm *drm = cli->drm;
+ struct nvif_device *device = &cli->device;
struct nv_dma_v0 args = {};
struct nouveau_channel *chan;
u32 target;
@@ -155,8 +154,7 @@ nouveau_channel_prep(struct nouveau_drm *drm, struct nvif_device *device,
if (!chan)
return -ENOMEM;
- chan->device = device;
- chan->drm = drm;
+ chan->cli = cli;
chan->vmm = nouveau_cli_vmm(cli);
atomic_set(&chan->killed, 0);
@@ -178,13 +176,12 @@ nouveau_channel_prep(struct nouveau_drm *drm, struct nvif_device *device,
return ret;
}
- chan->chan._push.mem.object.parent = cli->base.object.parent;
- chan->chan._push.mem.object.client = &cli->base;
- chan->chan._push.mem.object.name = "chanPush";
- chan->chan._push.mem.object.map.ptr = chan->push.buffer->kmap.virtual;
- chan->chan._push.wait = nouveau_channel_wait;
- chan->chan._push.kick = nouveau_channel_kick;
- chan->chan.push = &chan->chan._push;
+ chan->chan.push.mem.object.parent = cli->base.object.parent;
+ chan->chan.push.mem.object.client = &cli->base;
+ chan->chan.push.mem.object.name = "chanPush";
+ chan->chan.push.mem.object.map.ptr = chan->push.buffer->kmap.virtual;
+ chan->chan.push.wait = nouveau_channel_wait;
+ chan->chan.push.kick = nouveau_channel_kick;
/* create dma object covering the *entire* memory space that the
* pushbuf lives in, this is because the GEM code requires that
@@ -218,8 +215,7 @@ nouveau_channel_prep(struct nouveau_drm *drm, struct nvif_device *device,
*/
args.target = NV_DMA_V0_TARGET_PCI;
args.access = NV_DMA_V0_ACCESS_RDWR;
- args.start = nvxx_device(device)->func->
- resource_addr(nvxx_device(device), 1);
+ args.start = nvxx_device(drm)->func->resource_addr(nvxx_device(drm), 1);
args.limit = args.start + device->info.ram_user - 1;
} else {
args.target = NV_DMA_V0_TARGET_VRAM;
@@ -228,12 +224,11 @@ nouveau_channel_prep(struct nouveau_drm *drm, struct nvif_device *device,
args.limit = device->info.ram_user - 1;
}
} else {
- if (chan->drm->agp.bridge) {
+ if (drm->agp.bridge) {
args.target = NV_DMA_V0_TARGET_AGP;
args.access = NV_DMA_V0_ACCESS_RDWR;
- args.start = chan->drm->agp.base;
- args.limit = chan->drm->agp.base +
- chan->drm->agp.size - 1;
+ args.start = drm->agp.base;
+ args.limit = drm->agp.base + drm->agp.size - 1;
} else {
args.target = NV_DMA_V0_TARGET_VM;
args.access = NV_DMA_V0_ACCESS_RDWR;
@@ -254,7 +249,7 @@ nouveau_channel_prep(struct nouveau_drm *drm, struct nvif_device *device,
}
static int
-nouveau_channel_ctor(struct nouveau_drm *drm, struct nvif_device *device, bool priv, u64 runm,
+nouveau_channel_ctor(struct nouveau_cli *cli, bool priv, u64 runm,
struct nouveau_channel **pchan)
{
const struct nvif_mclass hosts[] = {
@@ -279,7 +274,7 @@ nouveau_channel_ctor(struct nouveau_drm *drm, struct nvif_device *device, bool p
struct nvif_chan_v0 chan;
char name[TASK_COMM_LEN+16];
} args;
- struct nouveau_cli *cli = (void *)device->object.client;
+ struct nvif_device *device = &cli->device;
struct nouveau_channel *chan;
const u64 plength = 0x10000;
const u64 ioffset = plength;
@@ -298,7 +293,7 @@ nouveau_channel_ctor(struct nouveau_drm *drm, struct nvif_device *device, bool p
size = ioffset + ilength;
/* allocate dma push buffer */
- ret = nouveau_channel_prep(drm, device, size, &chan);
+ ret = nouveau_channel_prep(cli, size, &chan);
*pchan = chan;
if (ret)
return ret;
@@ -363,8 +358,9 @@ nouveau_channel_ctor(struct nouveau_drm *drm, struct nvif_device *device, bool p
static int
nouveau_channel_init(struct nouveau_channel *chan, u32 vram, u32 gart)
{
- struct nvif_device *device = chan->device;
- struct nouveau_drm *drm = chan->drm;
+ struct nouveau_cli *cli = chan->cli;
+ struct nouveau_drm *drm = cli->drm;
+ struct nvif_device *device = &cli->device;
struct nv_dma_v0 args = {};
int ret, i;
@@ -419,12 +415,11 @@ nouveau_channel_init(struct nouveau_channel *chan, u32 vram, u32 gart)
args.start = 0;
args.limit = chan->vmm->vmm.limit - 1;
} else
- if (chan->drm->agp.bridge) {
+ if (drm->agp.bridge) {
args.target = NV_DMA_V0_TARGET_AGP;
args.access = NV_DMA_V0_ACCESS_RDWR;
- args.start = chan->drm->agp.base;
- args.limit = chan->drm->agp.base +
- chan->drm->agp.size - 1;
+ args.start = drm->agp.base;
+ args.limit = drm->agp.base + drm->agp.size - 1;
} else {
args.target = NV_DMA_V0_TARGET_VM;
args.access = NV_DMA_V0_ACCESS_RDWR;
@@ -465,12 +460,12 @@ nouveau_channel_init(struct nouveau_channel *chan, u32 vram, u32 gart)
chan->dma.cur = chan->dma.put;
chan->dma.free = chan->dma.max - chan->dma.cur;
- ret = PUSH_WAIT(chan->chan.push, NOUVEAU_DMA_SKIPS);
+ ret = PUSH_WAIT(&chan->chan.push, NOUVEAU_DMA_SKIPS);
if (ret)
return ret;
for (i = 0; i < NOUVEAU_DMA_SKIPS; i++)
- PUSH_DATA(chan->chan.push, 0x00000000);
+ PUSH_DATA(&chan->chan.push, 0x00000000);
/* allocate software object class (used for fences on <= nv05) */
if (device->info.family < NV_DEVICE_INFO_V0_CELSIUS) {
@@ -480,26 +475,25 @@ nouveau_channel_init(struct nouveau_channel *chan, u32 vram, u32 gart)
if (ret)
return ret;
- ret = PUSH_WAIT(chan->chan.push, 2);
+ ret = PUSH_WAIT(&chan->chan.push, 2);
if (ret)
return ret;
- PUSH_NVSQ(chan->chan.push, NV_SW, 0x0000, chan->nvsw.handle);
- PUSH_KICK(chan->chan.push);
+ PUSH_NVSQ(&chan->chan.push, NV_SW, 0x0000, chan->nvsw.handle);
+ PUSH_KICK(&chan->chan.push);
}
/* initialise synchronisation */
- return nouveau_fence(chan->drm)->context_new(chan);
+ return nouveau_fence(drm)->context_new(chan);
}
int
-nouveau_channel_new(struct nouveau_drm *drm, struct nvif_device *device,
+nouveau_channel_new(struct nouveau_cli *cli,
bool priv, u64 runm, u32 vram, u32 gart, struct nouveau_channel **pchan)
{
- struct nouveau_cli *cli = (void *)device->object.client;
int ret;
- ret = nouveau_channel_ctor(drm, device, priv, runm, pchan);
+ ret = nouveau_channel_ctor(cli, priv, runm, pchan);
if (ret) {
NV_PRINTK(dbg, cli, "channel create, %d\n", ret);
return ret;
diff --git a/drivers/gpu/drm/nouveau/nouveau_chan.h b/drivers/gpu/drm/nouveau/nouveau_chan.h
index 5de2ef4e98c2..016f668c0bc1 100644
--- a/drivers/gpu/drm/nouveau/nouveau_chan.h
+++ b/drivers/gpu/drm/nouveau/nouveau_chan.h
@@ -8,12 +8,10 @@ struct nvif_device;
struct nouveau_channel {
struct {
- struct nvif_push _push;
- struct nvif_push *push;
+ struct nvif_push push;
} chan;
- struct nvif_device *device;
- struct nouveau_drm *drm;
+ struct nouveau_cli *cli;
struct nouveau_vmm *vmm;
struct nvif_mem mem_userd;
@@ -62,7 +60,7 @@ struct nouveau_channel {
int nouveau_channels_init(struct nouveau_drm *);
void nouveau_channels_fini(struct nouveau_drm *);
-int nouveau_channel_new(struct nouveau_drm *, struct nvif_device *, bool priv, u64 runm,
+int nouveau_channel_new(struct nouveau_cli *, bool priv, u64 runm,
u32 vram, u32 gart, struct nouveau_channel **);
void nouveau_channel_del(struct nouveau_channel **);
int nouveau_channel_idle(struct nouveau_channel *);
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index d4725a968827..8a87e9697a42 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -446,10 +446,8 @@ static struct nouveau_drm_prop_enum_list dither_depth[] = {
} while(0)
void
-nouveau_display_hpd_resume(struct drm_device *dev)
+nouveau_display_hpd_resume(struct nouveau_drm *drm)
{
- struct nouveau_drm *drm = nouveau_drm(dev);
-
if (drm->headless)
return;
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.h b/drivers/gpu/drm/nouveau/nouveau_display.h
index 2ab2ddb1eadf..1f506f8b289c 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.h
+++ b/drivers/gpu/drm/nouveau/nouveau_display.h
@@ -45,7 +45,7 @@ nouveau_display(struct drm_device *dev)
int nouveau_display_create(struct drm_device *dev);
void nouveau_display_destroy(struct drm_device *dev);
int nouveau_display_init(struct drm_device *dev, bool resume, bool runtime);
-void nouveau_display_hpd_resume(struct drm_device *dev);
+void nouveau_display_hpd_resume(struct nouveau_drm *);
void nouveau_display_fini(struct drm_device *dev, bool suspend, bool runtime);
int nouveau_display_suspend(struct drm_device *dev, bool runtime);
void nouveau_display_resume(struct drm_device *dev, bool runtime);
diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.c b/drivers/gpu/drm/nouveau/nouveau_dma.c
index b01c029f3a90..a1f329ef0641 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dma.c
@@ -72,7 +72,7 @@ void
nv50_dma_push(struct nouveau_channel *chan, u64 offset, u32 length,
bool no_prefetch)
{
- struct nvif_user *user = &chan->drm->client.device.user;
+ struct nvif_user *user = &chan->cli->drm->client.device.user;
struct nouveau_bo *pb = chan->push.buffer;
int ip = (chan->dma.ib_put * 2) + chan->dma.ib_base;
diff --git a/drivers/gpu/drm/nouveau/nouveau_dmem.c b/drivers/gpu/drm/nouveau/nouveau_dmem.c
index 6719353e2e13..1f2d649f4b96 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dmem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dmem.c
@@ -128,7 +128,7 @@ static void nouveau_dmem_page_free(struct page *page)
static void nouveau_dmem_fence_done(struct nouveau_fence **fence)
{
if (fence) {
- nouveau_fence_wait(*fence, false);
+ nouveau_fence_wait(*fence, true, false);
nouveau_fence_unref(fence);
} else {
/*
@@ -294,7 +294,7 @@ nouveau_dmem_chunk_alloc(struct nouveau_drm *drm, struct page **ppage)
out_bo_unpin:
nouveau_bo_unpin(chunk->bo);
out_bo_free:
- nouveau_bo_ref(NULL, &chunk->bo);
+ nouveau_bo_fini(chunk->bo);
out_release:
release_mem_region(chunk->pagemap.range.start, range_len(&chunk->pagemap.range));
out_free:
@@ -426,7 +426,7 @@ nouveau_dmem_fini(struct nouveau_drm *drm)
list_for_each_entry_safe(chunk, tmp, &drm->dmem->chunks, list) {
nouveau_dmem_evict_chunk(chunk);
nouveau_bo_unpin(chunk->bo);
- nouveau_bo_ref(NULL, &chunk->bo);
+ nouveau_bo_fini(chunk->bo);
WARN_ON(chunk->callocated);
list_del(&chunk->list);
memunmap_pages(&chunk->pagemap);
@@ -443,7 +443,7 @@ nvc0b5_migrate_copy(struct nouveau_drm *drm, u64 npages,
enum nouveau_aper dst_aper, u64 dst_addr,
enum nouveau_aper src_aper, u64 src_addr)
{
- struct nvif_push *push = drm->dmem->migrate.chan->chan.push;
+ struct nvif_push *push = &drm->dmem->migrate.chan->chan.push;
u32 launch_dma = 0;
int ret;
@@ -516,7 +516,7 @@ static int
nvc0b5_migrate_clear(struct nouveau_drm *drm, u32 length,
enum nouveau_aper dst_aper, u64 dst_addr)
{
- struct nvif_push *push = drm->dmem->migrate.chan->chan.push;
+ struct nvif_push *push = &drm->dmem->migrate.chan->chan.push;
u32 launch_dma = 0;
int ret;
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index a58c31089613..ac7c60fb14d3 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -63,7 +63,6 @@
#include "nouveau_abi16.h"
#include "nouveau_fence.h"
#include "nouveau_debugfs.h"
-#include "nouveau_usif.h"
#include "nouveau_connector.h"
#include "nouveau_platform.h"
#include "nouveau_svm.h"
@@ -200,7 +199,6 @@ nouveau_cli_fini(struct nouveau_cli *cli)
flush_work(&cli->work);
WARN_ON(!list_empty(&cli->worker));
- usif_client_fini(cli);
if (cli->sched)
nouveau_sched_destroy(&cli->sched);
if (uvmm)
@@ -208,10 +206,11 @@ nouveau_cli_fini(struct nouveau_cli *cli)
nouveau_vmm_fini(&cli->svm);
nouveau_vmm_fini(&cli->vmm);
nvif_mmu_dtor(&cli->mmu);
+ cli->device.object.map.ptr = NULL;
nvif_device_dtor(&cli->device);
- mutex_lock(&cli->drm->master.lock);
+ mutex_lock(&cli->drm->client_mutex);
nvif_client_dtor(&cli->base);
- mutex_unlock(&cli->drm->master.lock);
+ mutex_unlock(&cli->drm->client_mutex);
}
static int
@@ -226,13 +225,6 @@ nouveau_cli_init(struct nouveau_drm *drm, const char *sname,
{}
};
static const struct nvif_mclass
- mmus[] = {
- { NVIF_CLASS_MMU_GF100, -1 },
- { NVIF_CLASS_MMU_NV50 , -1 },
- { NVIF_CLASS_MMU_NV04 , -1 },
- {}
- };
- static const struct nvif_mclass
vmms[] = {
{ NVIF_CLASS_VMM_GP100, -1 },
{ NVIF_CLASS_VMM_GM200, -1 },
@@ -241,50 +233,33 @@ nouveau_cli_init(struct nouveau_drm *drm, const char *sname,
{ NVIF_CLASS_VMM_NV04 , -1 },
{}
};
- u64 device = nouveau_name(drm->dev);
int ret;
snprintf(cli->name, sizeof(cli->name), "%s", sname);
cli->drm = drm;
mutex_init(&cli->mutex);
- usif_client_init(cli);
INIT_WORK(&cli->work, nouveau_cli_work);
INIT_LIST_HEAD(&cli->worker);
mutex_init(&cli->lock);
- if (cli == &drm->master) {
- ret = nvif_driver_init(NULL, nouveau_config, nouveau_debug,
- cli->name, device, &cli->base);
- } else {
- mutex_lock(&drm->master.lock);
- ret = nvif_client_ctor(&drm->master.base, cli->name, device,
- &cli->base);
- mutex_unlock(&drm->master.lock);
- }
+ mutex_lock(&drm->client_mutex);
+ ret = nvif_client_ctor(&drm->_client, cli->name, &cli->base);
+ mutex_unlock(&drm->client_mutex);
if (ret) {
NV_PRINTK(err, cli, "Client allocation failed: %d\n", ret);
goto done;
}
- ret = nvif_device_ctor(&cli->base.object, "drmDevice", 0, NV_DEVICE,
- &(struct nv_device_v0) {
- .device = ~0,
- .priv = true,
- }, sizeof(struct nv_device_v0),
- &cli->device);
+ ret = nvif_device_ctor(&cli->base, "drmDevice", &cli->device);
if (ret) {
NV_PRINTK(err, cli, "Device allocation failed: %d\n", ret);
goto done;
}
- ret = nvif_mclass(&cli->device.object, mmus);
- if (ret < 0) {
- NV_PRINTK(err, cli, "No supported MMU class\n");
- goto done;
- }
+ cli->device.object.map.ptr = drm->device.object.map.ptr;
- ret = nvif_mmu_ctor(&cli->device.object, "drmMmu", mmus[ret].oclass,
+ ret = nvif_mmu_ctor(&cli->device.object, "drmMmu", drm->mmu.object.oclass,
&cli->mmu);
if (ret) {
NV_PRINTK(err, cli, "MMU allocation failed: %d\n", ret);
@@ -356,7 +331,7 @@ nouveau_accel_ce_init(struct nouveau_drm *drm)
return;
}
- ret = nouveau_channel_new(drm, device, false, runm, NvDmaFB, NvDmaTT, &drm->cechan);
+ ret = nouveau_channel_new(&drm->client, false, runm, NvDmaFB, NvDmaTT, &drm->cechan);
if (ret)
NV_ERROR(drm, "failed to create ce channel, %d\n", ret);
}
@@ -384,7 +359,7 @@ nouveau_accel_gr_init(struct nouveau_drm *drm)
return;
}
- ret = nouveau_channel_new(drm, device, false, runm, NvDmaFB, NvDmaTT, &drm->channel);
+ ret = nouveau_channel_new(&drm->client, false, runm, NvDmaFB, NvDmaTT, &drm->channel);
if (ret) {
NV_ERROR(drm, "failed to create kernel channel, %d\n", ret);
nouveau_accel_gr_fini(drm);
@@ -407,7 +382,8 @@ nouveau_accel_gr_init(struct nouveau_drm *drm)
}
if (ret == 0) {
- struct nvif_push *push = drm->channel->chan.push;
+ struct nvif_push *push = &drm->channel->chan.push;
+
ret = PUSH_WAIT(push, 8);
if (ret == 0) {
if (device->info.chipset >= 0x11) {
@@ -432,8 +408,7 @@ nouveau_accel_gr_init(struct nouveau_drm *drm)
* any GPU where it's possible we'll end up using M2MF for BO moves.
*/
if (device->info.family < NV_DEVICE_INFO_V0_FERMI) {
- ret = nvkm_gpuobj_new(nvxx_device(device), 32, 0, false, NULL,
- &drm->notify);
+ ret = nvkm_gpuobj_new(nvxx_device(drm), 32, 0, false, NULL, &drm->notify);
if (ret) {
NV_ERROR(drm, "failed to allocate notifier, %d\n", ret);
nouveau_accel_gr_fini(drm);
@@ -578,37 +553,70 @@ nouveau_parent = {
.errorf = nouveau_drm_errorf,
};
-static int
-nouveau_drm_device_init(struct drm_device *dev)
+static void
+nouveau_drm_device_fini(struct nouveau_drm *drm)
{
- struct nouveau_drm *drm;
- int ret;
+ struct drm_device *dev = drm->dev;
+ struct nouveau_cli *cli, *temp_cli;
- if (!(drm = kzalloc(sizeof(*drm), GFP_KERNEL)))
- return -ENOMEM;
- dev->dev_private = drm;
- drm->dev = dev;
+ if (nouveau_pmops_runtime()) {
+ pm_runtime_get_sync(dev->dev);
+ pm_runtime_forbid(dev->dev);
+ }
- nvif_parent_ctor(&nouveau_parent, &drm->parent);
- drm->master.base.object.parent = &drm->parent;
+ nouveau_led_fini(dev);
+ nouveau_dmem_fini(drm);
+ nouveau_svm_fini(drm);
+ nouveau_hwmon_fini(dev);
+ nouveau_debugfs_fini(drm);
- drm->sched_wq = alloc_workqueue("nouveau_sched_wq_shared", 0,
- WQ_MAX_ACTIVE);
- if (!drm->sched_wq) {
- ret = -ENOMEM;
- goto fail_alloc;
+ if (dev->mode_config.num_crtc)
+ nouveau_display_fini(dev, false, false);
+ nouveau_display_destroy(dev);
+
+ nouveau_accel_fini(drm);
+ nouveau_bios_takedown(dev);
+
+ nouveau_ttm_fini(drm);
+ nouveau_vga_fini(drm);
+
+ /*
+ * There may be existing clients from as-yet unclosed files. For now,
+ * clean them up here rather than deferring until the file is closed,
+ * but this likely not correct if we want to support hot-unplugging
+ * properly.
+ */
+ mutex_lock(&drm->clients_lock);
+ list_for_each_entry_safe(cli, temp_cli, &drm->clients, head) {
+ list_del(&cli->head);
+ mutex_lock(&cli->mutex);
+ if (cli->abi16)
+ nouveau_abi16_fini(cli->abi16);
+ mutex_unlock(&cli->mutex);
+ nouveau_cli_fini(cli);
+ kfree(cli);
}
+ mutex_unlock(&drm->clients_lock);
- ret = nouveau_cli_init(drm, "DRM-master", &drm->master);
- if (ret)
- goto fail_wq;
+ nouveau_cli_fini(&drm->client);
+ destroy_workqueue(drm->sched_wq);
+ mutex_destroy(&drm->clients_lock);
+}
+
+static int
+nouveau_drm_device_init(struct nouveau_drm *drm)
+{
+ struct drm_device *dev = drm->dev;
+ int ret;
+
+ drm->sched_wq = alloc_workqueue("nouveau_sched_wq_shared", 0,
+ WQ_MAX_ACTIVE);
+ if (!drm->sched_wq)
+ return -ENOMEM;
ret = nouveau_cli_init(drm, "DRM", &drm->client);
if (ret)
- goto fail_master;
-
- nvxx_client(&drm->client.base)->debug =
- nvkm_dbgopt(nouveau_debug, "DRM");
+ goto fail_wq;
INIT_LIST_HEAD(&drm->clients);
mutex_init(&drm->clients_lock);
@@ -658,6 +666,12 @@ nouveau_drm_device_init(struct drm_device *dev)
pm_runtime_put(dev->dev);
}
+ ret = drm_dev_register(drm->dev, 0);
+ if (ret) {
+ nouveau_drm_device_fini(drm);
+ return ret;
+ }
+
return 0;
fail_dispinit:
nouveau_display_destroy(dev);
@@ -669,67 +683,95 @@ fail_bios:
fail_ttm:
nouveau_vga_fini(drm);
nouveau_cli_fini(&drm->client);
-fail_master:
- nouveau_cli_fini(&drm->master);
fail_wq:
destroy_workqueue(drm->sched_wq);
-fail_alloc:
- nvif_parent_dtor(&drm->parent);
- kfree(drm);
return ret;
}
static void
-nouveau_drm_device_fini(struct drm_device *dev)
+nouveau_drm_device_del(struct nouveau_drm *drm)
{
- struct nouveau_cli *cli, *temp_cli;
- struct nouveau_drm *drm = nouveau_drm(dev);
+ if (drm->dev)
+ drm_dev_put(drm->dev);
- if (nouveau_pmops_runtime()) {
- pm_runtime_get_sync(dev->dev);
- pm_runtime_forbid(dev->dev);
+ nvif_mmu_dtor(&drm->mmu);
+ nvif_device_dtor(&drm->device);
+ nvif_client_dtor(&drm->_client);
+ nvif_parent_dtor(&drm->parent);
+
+ mutex_destroy(&drm->client_mutex);
+ kfree(drm);
+}
+
+static struct nouveau_drm *
+nouveau_drm_device_new(const struct drm_driver *drm_driver, struct device *parent,
+ struct nvkm_device *device)
+{
+ static const struct nvif_mclass
+ mmus[] = {
+ { NVIF_CLASS_MMU_GF100, -1 },
+ { NVIF_CLASS_MMU_NV50 , -1 },
+ { NVIF_CLASS_MMU_NV04 , -1 },
+ {}
+ };
+ struct nouveau_drm *drm;
+ int ret;
+
+ drm = kzalloc(sizeof(*drm), GFP_KERNEL);
+ if (!drm)
+ return ERR_PTR(-ENOMEM);
+
+ drm->nvkm = device;
+
+ drm->dev = drm_dev_alloc(drm_driver, parent);
+ if (IS_ERR(drm->dev)) {
+ ret = PTR_ERR(drm->dev);
+ goto done;
}
- nouveau_led_fini(dev);
- nouveau_dmem_fini(drm);
- nouveau_svm_fini(drm);
- nouveau_hwmon_fini(dev);
- nouveau_debugfs_fini(drm);
+ drm->dev->dev_private = drm;
+ dev_set_drvdata(parent, drm);
- if (dev->mode_config.num_crtc)
- nouveau_display_fini(dev, false, false);
- nouveau_display_destroy(dev);
+ nvif_parent_ctor(&nouveau_parent, &drm->parent);
+ mutex_init(&drm->client_mutex);
+ drm->_client.object.parent = &drm->parent;
- nouveau_accel_fini(drm);
- nouveau_bios_takedown(dev);
+ ret = nvif_driver_init(NULL, nouveau_config, nouveau_debug, "drm",
+ nouveau_name(drm->dev), &drm->_client);
+ if (ret)
+ goto done;
- nouveau_ttm_fini(drm);
- nouveau_vga_fini(drm);
+ ret = nvif_device_ctor(&drm->_client, "drmDevice", &drm->device);
+ if (ret) {
+ NV_ERROR(drm, "Device allocation failed: %d\n", ret);
+ goto done;
+ }
- /*
- * There may be existing clients from as-yet unclosed files. For now,
- * clean them up here rather than deferring until the file is closed,
- * but this likely not correct if we want to support hot-unplugging
- * properly.
- */
- mutex_lock(&drm->clients_lock);
- list_for_each_entry_safe(cli, temp_cli, &drm->clients, head) {
- list_del(&cli->head);
- mutex_lock(&cli->mutex);
- if (cli->abi16)
- nouveau_abi16_fini(cli->abi16);
- mutex_unlock(&cli->mutex);
- nouveau_cli_fini(cli);
- kfree(cli);
+ ret = nvif_device_map(&drm->device);
+ if (ret) {
+ NV_ERROR(drm, "Failed to map PRI: %d\n", ret);
+ goto done;
}
- mutex_unlock(&drm->clients_lock);
- nouveau_cli_fini(&drm->client);
- nouveau_cli_fini(&drm->master);
- destroy_workqueue(drm->sched_wq);
- nvif_parent_dtor(&drm->parent);
- mutex_destroy(&drm->clients_lock);
- kfree(drm);
+ ret = nvif_mclass(&drm->device.object, mmus);
+ if (ret < 0) {
+ NV_ERROR(drm, "No supported MMU class\n");
+ goto done;
+ }
+
+ ret = nvif_mmu_ctor(&drm->device.object, "drmMmu", mmus[ret].oclass, &drm->mmu);
+ if (ret) {
+ NV_ERROR(drm, "MMU allocation failed: %d\n", ret);
+ goto done;
+ }
+
+done:
+ if (ret) {
+ nouveau_drm_device_del(drm);
+ drm = NULL;
+ }
+
+ return ret ? ERR_PTR(ret) : drm;
}
/*
@@ -774,8 +816,7 @@ nouveau_drm_device_fini(struct drm_device *dev)
static void quirk_broken_nv_runpm(struct pci_dev *pdev)
{
- struct drm_device *dev = pci_get_drvdata(pdev);
- struct nouveau_drm *drm = nouveau_drm(dev);
+ struct nouveau_drm *drm = pci_get_drvdata(pdev);
struct pci_dev *bridge = pci_upstream_bridge(pdev);
if (!bridge || bridge->vendor != PCI_VENDOR_ID_INTEL)
@@ -794,7 +835,7 @@ static int nouveau_drm_probe(struct pci_dev *pdev,
const struct pci_device_id *pent)
{
struct nvkm_device *device;
- struct drm_device *drm_dev;
+ struct nouveau_drm *drm;
int ret;
if (vga_switcheroo_client_probe_defer(pdev))
@@ -803,31 +844,23 @@ static int nouveau_drm_probe(struct pci_dev *pdev,
/* We need to check that the chipset is supported before booting
* fbdev off the hardware, as there's no way to put it back.
*/
- ret = nvkm_device_pci_new(pdev, nouveau_config, "error",
- true, false, 0, &device);
+ ret = nvkm_device_pci_new(pdev, nouveau_config, nouveau_debug, &device);
if (ret)
return ret;
- nvkm_device_del(&device);
-
/* Remove conflicting drivers (vesafb, efifb etc). */
ret = drm_aperture_remove_conflicting_pci_framebuffers(pdev, &driver_pci);
if (ret)
return ret;
- ret = nvkm_device_pci_new(pdev, nouveau_config, nouveau_debug,
- true, true, ~0ULL, &device);
- if (ret)
- return ret;
-
pci_set_master(pdev);
if (nouveau_atomic)
driver_pci.driver_features |= DRIVER_ATOMIC;
- drm_dev = drm_dev_alloc(&driver_pci, &pdev->dev);
- if (IS_ERR(drm_dev)) {
- ret = PTR_ERR(drm_dev);
+ drm = nouveau_drm_device_new(&driver_pci, &pdev->dev, device);
+ if (IS_ERR(drm)) {
+ ret = PTR_ERR(drm);
goto fail_nvkm;
}
@@ -835,69 +868,55 @@ static int nouveau_drm_probe(struct pci_dev *pdev,
if (ret)
goto fail_drm;
- pci_set_drvdata(pdev, drm_dev);
-
- ret = nouveau_drm_device_init(drm_dev);
+ ret = nouveau_drm_device_init(drm);
if (ret)
goto fail_pci;
- ret = drm_dev_register(drm_dev, pent->driver_data);
- if (ret)
- goto fail_drm_dev_init;
-
- if (nouveau_drm(drm_dev)->client.device.info.ram_size <= 32 * 1024 * 1024)
- drm_fbdev_ttm_setup(drm_dev, 8);
+ if (drm->client.device.info.ram_size <= 32 * 1024 * 1024)
+ drm_fbdev_ttm_setup(drm->dev, 8);
else
- drm_fbdev_ttm_setup(drm_dev, 32);
+ drm_fbdev_ttm_setup(drm->dev, 32);
quirk_broken_nv_runpm(pdev);
return 0;
-fail_drm_dev_init:
- nouveau_drm_device_fini(drm_dev);
fail_pci:
pci_disable_device(pdev);
fail_drm:
- drm_dev_put(drm_dev);
+ nouveau_drm_device_del(drm);
fail_nvkm:
nvkm_device_del(&device);
return ret;
}
void
-nouveau_drm_device_remove(struct drm_device *dev)
+nouveau_drm_device_remove(struct nouveau_drm *drm)
{
- struct nouveau_drm *drm = nouveau_drm(dev);
- struct nvkm_client *client;
- struct nvkm_device *device;
+ struct nvkm_device *device = drm->nvkm;
- drm_dev_unplug(dev);
+ drm_dev_unplug(drm->dev);
- client = nvxx_client(&drm->client.base);
- device = nvkm_device_find(client->device);
-
- nouveau_drm_device_fini(dev);
- drm_dev_put(dev);
+ nouveau_drm_device_fini(drm);
+ nouveau_drm_device_del(drm);
nvkm_device_del(&device);
}
static void
nouveau_drm_remove(struct pci_dev *pdev)
{
- struct drm_device *dev = pci_get_drvdata(pdev);
- struct nouveau_drm *drm = nouveau_drm(dev);
+ struct nouveau_drm *drm = pci_get_drvdata(pdev);
/* revert our workaround */
if (drm->old_pm_cap)
pdev->pm_cap = drm->old_pm_cap;
- nouveau_drm_device_remove(dev);
+ nouveau_drm_device_remove(drm);
pci_disable_device(pdev);
}
static int
-nouveau_do_suspend(struct drm_device *dev, bool runtime)
+nouveau_do_suspend(struct nouveau_drm *drm, bool runtime)
{
- struct nouveau_drm *drm = nouveau_drm(dev);
+ struct drm_device *dev = drm->dev;
struct ttm_resource_manager *man;
int ret;
@@ -939,7 +958,7 @@ nouveau_do_suspend(struct drm_device *dev, bool runtime)
}
NV_DEBUG(drm, "suspending object tree...\n");
- ret = nvif_client_suspend(&drm->master.base);
+ ret = nvif_client_suspend(&drm->_client);
if (ret)
goto fail_client;
@@ -958,13 +977,13 @@ fail_display:
}
static int
-nouveau_do_resume(struct drm_device *dev, bool runtime)
+nouveau_do_resume(struct nouveau_drm *drm, bool runtime)
{
+ struct drm_device *dev = drm->dev;
int ret = 0;
- struct nouveau_drm *drm = nouveau_drm(dev);
NV_DEBUG(drm, "resuming object tree...\n");
- ret = nvif_client_resume(&drm->master.base);
+ ret = nvif_client_resume(&drm->_client);
if (ret) {
NV_ERROR(drm, "Client resume failed with error: %d\n", ret);
return ret;
@@ -991,14 +1010,14 @@ int
nouveau_pmops_suspend(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
- struct drm_device *drm_dev = pci_get_drvdata(pdev);
+ struct nouveau_drm *drm = pci_get_drvdata(pdev);
int ret;
- if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF ||
- drm_dev->switch_power_state == DRM_SWITCH_POWER_DYNAMIC_OFF)
+ if (drm->dev->switch_power_state == DRM_SWITCH_POWER_OFF ||
+ drm->dev->switch_power_state == DRM_SWITCH_POWER_DYNAMIC_OFF)
return 0;
- ret = nouveau_do_suspend(drm_dev, false);
+ ret = nouveau_do_suspend(drm, false);
if (ret)
return ret;
@@ -1013,11 +1032,11 @@ int
nouveau_pmops_resume(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
- struct drm_device *drm_dev = pci_get_drvdata(pdev);
+ struct nouveau_drm *drm = pci_get_drvdata(pdev);
int ret;
- if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF ||
- drm_dev->switch_power_state == DRM_SWITCH_POWER_DYNAMIC_OFF)
+ if (drm->dev->switch_power_state == DRM_SWITCH_POWER_OFF ||
+ drm->dev->switch_power_state == DRM_SWITCH_POWER_DYNAMIC_OFF)
return 0;
pci_set_power_state(pdev, PCI_D0);
@@ -1027,10 +1046,10 @@ nouveau_pmops_resume(struct device *dev)
return ret;
pci_set_master(pdev);
- ret = nouveau_do_resume(drm_dev, false);
+ ret = nouveau_do_resume(drm, false);
/* Monitors may have been connected / disconnected during suspend */
- nouveau_display_hpd_resume(drm_dev);
+ nouveau_display_hpd_resume(drm);
return ret;
}
@@ -1038,17 +1057,17 @@ nouveau_pmops_resume(struct device *dev)
static int
nouveau_pmops_freeze(struct device *dev)
{
- struct pci_dev *pdev = to_pci_dev(dev);
- struct drm_device *drm_dev = pci_get_drvdata(pdev);
- return nouveau_do_suspend(drm_dev, false);
+ struct nouveau_drm *drm = dev_get_drvdata(dev);
+
+ return nouveau_do_suspend(drm, false);
}
static int
nouveau_pmops_thaw(struct device *dev)
{
- struct pci_dev *pdev = to_pci_dev(dev);
- struct drm_device *drm_dev = pci_get_drvdata(pdev);
- return nouveau_do_resume(drm_dev, false);
+ struct nouveau_drm *drm = dev_get_drvdata(dev);
+
+ return nouveau_do_resume(drm, false);
}
bool
@@ -1063,7 +1082,7 @@ static int
nouveau_pmops_runtime_suspend(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
- struct drm_device *drm_dev = pci_get_drvdata(pdev);
+ struct nouveau_drm *drm = pci_get_drvdata(pdev);
int ret;
if (!nouveau_pmops_runtime()) {
@@ -1072,12 +1091,12 @@ nouveau_pmops_runtime_suspend(struct device *dev)
}
nouveau_switcheroo_optimus_dsm();
- ret = nouveau_do_suspend(drm_dev, true);
+ ret = nouveau_do_suspend(drm, true);
pci_save_state(pdev);
pci_disable_device(pdev);
pci_ignore_hotplug(pdev);
pci_set_power_state(pdev, PCI_D3cold);
- drm_dev->switch_power_state = DRM_SWITCH_POWER_DYNAMIC_OFF;
+ drm->dev->switch_power_state = DRM_SWITCH_POWER_DYNAMIC_OFF;
return ret;
}
@@ -1085,9 +1104,8 @@ static int
nouveau_pmops_runtime_resume(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
- struct drm_device *drm_dev = pci_get_drvdata(pdev);
- struct nouveau_drm *drm = nouveau_drm(drm_dev);
- struct nvif_device *device = &nouveau_drm(drm_dev)->client.device;
+ struct nouveau_drm *drm = pci_get_drvdata(pdev);
+ struct nvif_device *device = &drm->client.device;
int ret;
if (!nouveau_pmops_runtime()) {
@@ -1102,7 +1120,7 @@ nouveau_pmops_runtime_resume(struct device *dev)
return ret;
pci_set_master(pdev);
- ret = nouveau_do_resume(drm_dev, true);
+ ret = nouveau_do_resume(drm, true);
if (ret) {
NV_ERROR(drm, "resume failed with: %d\n", ret);
return ret;
@@ -1110,10 +1128,10 @@ nouveau_pmops_runtime_resume(struct device *dev)
/* do magic */
nvif_mask(&device->object, 0x088488, (1 << 25), (1 << 25));
- drm_dev->switch_power_state = DRM_SWITCH_POWER_ON;
+ drm->dev->switch_power_state = DRM_SWITCH_POWER_ON;
/* Monitors may have been connected / disconnected during suspend */
- nouveau_display_hpd_resume(drm_dev);
+ nouveau_display_hpd_resume(drm);
return ret;
}
@@ -1249,7 +1267,7 @@ nouveau_drm_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
switch (_IOC_NR(cmd) - DRM_COMMAND_BASE) {
case DRM_NOUVEAU_NVIF:
- ret = usif_ioctl(filp, (void __user *)arg, _IOC_SIZE(cmd));
+ ret = nouveau_abi16_ioctl(filp, (void __user *)arg, _IOC_SIZE(cmd));
break;
default:
ret = drm_ioctl(file, cmd, arg);
@@ -1369,15 +1387,14 @@ nouveau_platform_device_create(const struct nvkm_device_tegra_func *func,
struct platform_device *pdev,
struct nvkm_device **pdevice)
{
- struct drm_device *drm;
+ struct nouveau_drm *drm;
int err;
- err = nvkm_device_tegra_new(func, pdev, nouveau_config, nouveau_debug,
- true, true, ~0ULL, pdevice);
+ err = nvkm_device_tegra_new(func, pdev, nouveau_config, nouveau_debug, pdevice);
if (err)
goto err_free;
- drm = drm_dev_alloc(&driver_platform, &pdev->dev);
+ drm = nouveau_drm_device_new(&driver_platform, &pdev->dev, *pdevice);
if (IS_ERR(drm)) {
err = PTR_ERR(drm);
goto err_free;
@@ -1387,12 +1404,10 @@ nouveau_platform_device_create(const struct nvkm_device_tegra_func *func,
if (err)
goto err_put;
- platform_set_drvdata(pdev, drm);
-
- return drm;
+ return drm->dev;
err_put:
- drm_dev_put(drm);
+ nouveau_drm_device_del(drm);
err_free:
nvkm_device_del(pdevice);
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index 25fca98a20bc..685d6ca3d8aa 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -201,8 +201,13 @@ u_memcpya(uint64_t user, unsigned int nmemb, unsigned int size)
#include <nvif/parent.h>
struct nouveau_drm {
+ struct nvkm_device *nvkm;
struct nvif_parent parent;
- struct nouveau_cli master;
+ struct mutex client_mutex;
+ struct nvif_client _client;
+ struct nvif_device device;
+ struct nvif_mmu mmu;
+
struct nouveau_cli client;
struct drm_device *dev;
@@ -326,25 +331,28 @@ bool nouveau_pmops_runtime(void);
struct drm_device *
nouveau_platform_device_create(const struct nvkm_device_tegra_func *,
struct platform_device *, struct nvkm_device **);
-void nouveau_drm_device_remove(struct drm_device *dev);
+void nouveau_drm_device_remove(struct nouveau_drm *);
#define NV_PRINTK(l,c,f,a...) do { \
struct nouveau_cli *_cli = (c); \
dev_##l(_cli->drm->dev->dev, "%s: "f, _cli->name, ##a); \
} while(0)
-#define NV_FATAL(drm,f,a...) NV_PRINTK(crit, &(drm)->client, f, ##a)
-#define NV_ERROR(drm,f,a...) NV_PRINTK(err, &(drm)->client, f, ##a)
-#define NV_WARN(drm,f,a...) NV_PRINTK(warn, &(drm)->client, f, ##a)
-#define NV_INFO(drm,f,a...) NV_PRINTK(info, &(drm)->client, f, ##a)
+#define NV_PRINTK_(l,drm,f,a...) do { \
+ dev_##l((drm)->nvkm->dev, "drm: "f, ##a); \
+} while(0)
+#define NV_FATAL(drm,f,a...) NV_PRINTK_(crit, (drm), f, ##a)
+#define NV_ERROR(drm,f,a...) NV_PRINTK_(err, (drm), f, ##a)
+#define NV_WARN(drm,f,a...) NV_PRINTK_(warn, (drm), f, ##a)
+#define NV_INFO(drm,f,a...) NV_PRINTK_(info, (drm), f, ##a)
#define NV_DEBUG(drm,f,a...) do { \
if (drm_debug_enabled(DRM_UT_DRIVER)) \
- NV_PRINTK(info, &(drm)->client, f, ##a); \
+ NV_PRINTK_(info, (drm), f, ##a); \
} while(0)
#define NV_ATOMIC(drm,f,a...) do { \
if (drm_debug_enabled(DRM_UT_ATOMIC)) \
- NV_PRINTK(info, &(drm)->client, f, ##a); \
+ NV_PRINTK_(info, (drm), f, ##a); \
} while(0)
#define NV_PRINTK_ONCE(l,c,f,a...) NV_PRINTK(l##_once,c,f, ##a)
@@ -355,4 +363,41 @@ void nouveau_drm_device_remove(struct drm_device *dev);
extern int nouveau_modeset;
+/*XXX: Don't use these in new code.
+ *
+ * These accessors are used in a few places (mostly older code paths)
+ * to get direct access to NVKM structures, where a more well-defined
+ * interface doesn't exist. Outside of the current use, these should
+ * not be relied on, and instead be implemented as NVIF.
+ *
+ * This is especially important when considering GSP-RM, as a lot the
+ * modules don't exist, or are "stub" implementations that just allow
+ * the GSP-RM paths to be bootstrapped.
+ */
+#include <subdev/bios.h>
+#include <subdev/fb.h>
+#include <subdev/gpio.h>
+#include <subdev/clk.h>
+#include <subdev/i2c.h>
+#include <subdev/timer.h>
+#include <subdev/therm.h>
+
+static inline struct nvkm_device *
+nvxx_device(struct nouveau_drm *drm)
+{
+ return drm->nvkm;
+}
+
+#define nvxx_bios(a) nvxx_device(a)->bios
+#define nvxx_fb(a) nvxx_device(a)->fb
+#define nvxx_gpio(a) nvxx_device(a)->gpio
+#define nvxx_clk(a) nvxx_device(a)->clk
+#define nvxx_i2c(a) nvxx_device(a)->i2c
+#define nvxx_iccsense(a) nvxx_device(a)->iccsense
+#define nvxx_therm(a) nvxx_device(a)->therm
+#define nvxx_volt(a) nvxx_device(a)->volt
+
+#include <engine/gr.h>
+
+#define nvxx_gr(a) nvxx_device(a)->gr
#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
index ba469767a20f..09686d038d60 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
@@ -181,8 +181,9 @@ nouveau_fence_wait_uevent_handler(struct nvif_event *event, void *repv, u32 repc
void
nouveau_fence_context_new(struct nouveau_channel *chan, struct nouveau_fence_chan *fctx)
{
- struct nouveau_fence_priv *priv = (void*)chan->drm->fence;
- struct nouveau_cli *cli = (void *)chan->user.client;
+ struct nouveau_cli *cli = chan->cli;
+ struct nouveau_drm *drm = cli->drm;
+ struct nouveau_fence_priv *priv = (void*)drm->fence;
struct {
struct nvif_event_v0 base;
struct nvif_chan_event_v0 host;
@@ -193,14 +194,14 @@ nouveau_fence_context_new(struct nouveau_channel *chan, struct nouveau_fence_cha
INIT_LIST_HEAD(&fctx->flip);
INIT_LIST_HEAD(&fctx->pending);
spin_lock_init(&fctx->lock);
- fctx->context = chan->drm->runl[chan->runlist].context_base + chan->chid;
+ fctx->context = drm->runl[chan->runlist].context_base + chan->chid;
- if (chan == chan->drm->cechan)
+ if (chan == drm->cechan)
strcpy(fctx->name, "copy engine channel");
- else if (chan == chan->drm->channel)
+ else if (chan == drm->channel)
strcpy(fctx->name, "generic kernel channel");
else
- strcpy(fctx->name, nvxx_client(&cli->base)->name);
+ strcpy(fctx->name, cli->name);
kref_init(&fctx->fence_ref);
if (!priv->uevent)
@@ -221,7 +222,7 @@ nouveau_fence_emit(struct nouveau_fence *fence)
{
struct nouveau_channel *chan = unrcu_pointer(fence->channel);
struct nouveau_fence_chan *fctx = chan->fence;
- struct nouveau_fence_priv *priv = (void*)chan->drm->fence;
+ struct nouveau_fence_priv *priv = (void*)chan->cli->drm->fence;
int ret;
fence->timeout = jiffies + (15 * HZ);
@@ -311,11 +312,39 @@ nouveau_fence_wait_legacy(struct dma_fence *f, bool intr, long wait)
return timeout - t;
}
+static int
+nouveau_fence_wait_busy(struct nouveau_fence *fence, bool intr)
+{
+ int ret = 0;
+
+ while (!nouveau_fence_done(fence)) {
+ if (time_after_eq(jiffies, fence->timeout)) {
+ ret = -EBUSY;
+ break;
+ }
+
+ __set_current_state(intr ?
+ TASK_INTERRUPTIBLE :
+ TASK_UNINTERRUPTIBLE);
+
+ if (intr && signal_pending(current)) {
+ ret = -ERESTARTSYS;
+ break;
+ }
+ }
+
+ __set_current_state(TASK_RUNNING);
+ return ret;
+}
+
int
-nouveau_fence_wait(struct nouveau_fence *fence, bool intr)
+nouveau_fence_wait(struct nouveau_fence *fence, bool lazy, bool intr)
{
long ret;
+ if (!lazy)
+ return nouveau_fence_wait_busy(fence, intr);
+
ret = dma_fence_wait_timeout(&fence->base, intr, 15 * HZ);
if (ret < 0)
return ret;
@@ -354,7 +383,7 @@ nouveau_fence_sync(struct nouveau_bo *nvbo, struct nouveau_channel *chan,
if (i == 0 && usage == DMA_RESV_USAGE_WRITE)
continue;
- f = nouveau_local_fence(fence, chan->drm);
+ f = nouveau_local_fence(fence, chan->cli->drm);
if (f) {
struct nouveau_channel *prev;
bool must_wait = true;
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.h b/drivers/gpu/drm/nouveau/nouveau_fence.h
index 1b63197b744a..8bc065acfe35 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.h
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.h
@@ -23,7 +23,7 @@ void nouveau_fence_unref(struct nouveau_fence **);
int nouveau_fence_emit(struct nouveau_fence *);
bool nouveau_fence_done(struct nouveau_fence *);
-int nouveau_fence_wait(struct nouveau_fence *, bool intr);
+int nouveau_fence_wait(struct nouveau_fence *, bool lazy, bool intr);
int nouveau_fence_sync(struct nouveau_bo *, struct nouveau_channel *, bool exclusive, bool intr);
struct nouveau_fence_chan {
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index 2e535caa7d6e..9ae2cee1c7c5 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -567,10 +567,11 @@ retry:
}
static int
-validate_list(struct nouveau_channel *chan, struct nouveau_cli *cli,
+validate_list(struct nouveau_channel *chan,
struct list_head *list, struct drm_nouveau_gem_pushbuf_bo *pbbo)
{
- struct nouveau_drm *drm = chan->drm;
+ struct nouveau_cli *cli = chan->cli;
+ struct nouveau_drm *drm = cli->drm;
struct nouveau_bo *nvbo;
int ret, relocs = 0;
@@ -642,7 +643,7 @@ nouveau_gem_pushbuf_validate(struct nouveau_channel *chan,
return ret;
}
- ret = validate_list(chan, cli, &op->list, pbbo);
+ ret = validate_list(chan, &op->list, pbbo);
if (unlikely(ret < 0)) {
if (ret != -ERESTARTSYS)
NV_PRINTK(err, cli, "validating bo list\n");
@@ -870,7 +871,7 @@ revalidate:
}
} else
if (drm->client.device.info.chipset >= 0x25) {
- ret = PUSH_WAIT(chan->chan.push, req->nr_push * 2);
+ ret = PUSH_WAIT(&chan->chan.push, req->nr_push * 2);
if (ret) {
NV_PRINTK(err, cli, "cal_space: %d\n", ret);
goto out;
@@ -880,11 +881,11 @@ revalidate:
struct nouveau_bo *nvbo = (void *)(unsigned long)
bo[push[i].bo_index].user_priv;
- PUSH_CALL(chan->chan.push, nvbo->offset + push[i].offset);
- PUSH_DATA(chan->chan.push, 0);
+ PUSH_CALL(&chan->chan.push, nvbo->offset + push[i].offset);
+ PUSH_DATA(&chan->chan.push, 0);
}
} else {
- ret = PUSH_WAIT(chan->chan.push, req->nr_push * (2 + NOUVEAU_DMA_SKIPS));
+ ret = PUSH_WAIT(&chan->chan.push, req->nr_push * (2 + NOUVEAU_DMA_SKIPS));
if (ret) {
NV_PRINTK(err, cli, "jmp_space: %d\n", ret);
goto out;
@@ -913,10 +914,10 @@ revalidate:
push[i].length - 8) / 4, cmd);
}
- PUSH_JUMP(chan->chan.push, nvbo->offset + push[i].offset);
- PUSH_DATA(chan->chan.push, 0);
+ PUSH_JUMP(&chan->chan.push, nvbo->offset + push[i].offset);
+ PUSH_DATA(&chan->chan.push, 0);
for (j = 0; j < NOUVEAU_DMA_SKIPS; j++)
- PUSH_DATA(chan->chan.push, 0);
+ PUSH_DATA(&chan->chan.push, 0);
}
}
@@ -928,7 +929,7 @@ revalidate:
}
if (sync) {
- if (!(ret = nouveau_fence_wait(fence, false))) {
+ if (!(ret = nouveau_fence_wait(fence, false, false))) {
if ((ret = dma_fence_get_status(&fence->base)) == 1)
ret = 0;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_hwmon.c b/drivers/gpu/drm/nouveau/nouveau_hwmon.c
index db30a4c2cd4d..5c07a9ee8b77 100644
--- a/drivers/gpu/drm/nouveau/nouveau_hwmon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_hwmon.c
@@ -52,7 +52,7 @@ nouveau_hwmon_temp1_auto_point1_temp(struct device *d,
{
struct drm_device *dev = dev_get_drvdata(d);
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nvkm_therm *therm = nvxx_therm(&drm->client.device);
+ struct nvkm_therm *therm = nvxx_therm(drm);
return sysfs_emit(buf, "%d\n",
therm->attr_get(therm, NVKM_THERM_ATTR_THRS_FAN_BOOST) * 1000);
@@ -64,7 +64,7 @@ nouveau_hwmon_set_temp1_auto_point1_temp(struct device *d,
{
struct drm_device *dev = dev_get_drvdata(d);
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nvkm_therm *therm = nvxx_therm(&drm->client.device);
+ struct nvkm_therm *therm = nvxx_therm(drm);
long value;
if (kstrtol(buf, 10, &value))
@@ -85,7 +85,7 @@ nouveau_hwmon_temp1_auto_point1_temp_hyst(struct device *d,
{
struct drm_device *dev = dev_get_drvdata(d);
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nvkm_therm *therm = nvxx_therm(&drm->client.device);
+ struct nvkm_therm *therm = nvxx_therm(drm);
return sysfs_emit(buf, "%d\n",
therm->attr_get(therm, NVKM_THERM_ATTR_THRS_FAN_BOOST_HYST) * 1000);
@@ -97,7 +97,7 @@ nouveau_hwmon_set_temp1_auto_point1_temp_hyst(struct device *d,
{
struct drm_device *dev = dev_get_drvdata(d);
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nvkm_therm *therm = nvxx_therm(&drm->client.device);
+ struct nvkm_therm *therm = nvxx_therm(drm);
long value;
if (kstrtol(buf, 10, &value))
@@ -118,7 +118,7 @@ nouveau_hwmon_get_pwm1_max(struct device *d,
{
struct drm_device *dev = dev_get_drvdata(d);
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nvkm_therm *therm = nvxx_therm(&drm->client.device);
+ struct nvkm_therm *therm = nvxx_therm(drm);
int ret;
ret = therm->attr_get(therm, NVKM_THERM_ATTR_FAN_MAX_DUTY);
@@ -134,7 +134,7 @@ nouveau_hwmon_get_pwm1_min(struct device *d,
{
struct drm_device *dev = dev_get_drvdata(d);
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nvkm_therm *therm = nvxx_therm(&drm->client.device);
+ struct nvkm_therm *therm = nvxx_therm(drm);
int ret;
ret = therm->attr_get(therm, NVKM_THERM_ATTR_FAN_MIN_DUTY);
@@ -150,7 +150,7 @@ nouveau_hwmon_set_pwm1_min(struct device *d, struct device_attribute *a,
{
struct drm_device *dev = dev_get_drvdata(d);
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nvkm_therm *therm = nvxx_therm(&drm->client.device);
+ struct nvkm_therm *therm = nvxx_therm(drm);
long value;
int ret;
@@ -173,7 +173,7 @@ nouveau_hwmon_set_pwm1_max(struct device *d, struct device_attribute *a,
{
struct drm_device *dev = dev_get_drvdata(d);
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nvkm_therm *therm = nvxx_therm(&drm->client.device);
+ struct nvkm_therm *therm = nvxx_therm(drm);
long value;
int ret;
@@ -247,7 +247,7 @@ static umode_t
nouveau_power_is_visible(const void *data, u32 attr, int channel)
{
struct nouveau_drm *drm = nouveau_drm((struct drm_device *)data);
- struct nvkm_iccsense *iccsense = nvxx_iccsense(&drm->client.device);
+ struct nvkm_iccsense *iccsense = nvxx_iccsense(drm);
if (!iccsense || !iccsense->data_valid || list_empty(&iccsense->rails))
return 0;
@@ -272,7 +272,7 @@ static umode_t
nouveau_temp_is_visible(const void *data, u32 attr, int channel)
{
struct nouveau_drm *drm = nouveau_drm((struct drm_device *)data);
- struct nvkm_therm *therm = nvxx_therm(&drm->client.device);
+ struct nvkm_therm *therm = nvxx_therm(drm);
if (!therm || !therm->attr_get || nvkm_therm_temp_get(therm) < 0)
return 0;
@@ -296,7 +296,7 @@ static umode_t
nouveau_pwm_is_visible(const void *data, u32 attr, int channel)
{
struct nouveau_drm *drm = nouveau_drm((struct drm_device *)data);
- struct nvkm_therm *therm = nvxx_therm(&drm->client.device);
+ struct nvkm_therm *therm = nvxx_therm(drm);
if (!therm || !therm->attr_get || !therm->fan_get ||
therm->fan_get(therm) < 0)
@@ -315,7 +315,7 @@ static umode_t
nouveau_input_is_visible(const void *data, u32 attr, int channel)
{
struct nouveau_drm *drm = nouveau_drm((struct drm_device *)data);
- struct nvkm_volt *volt = nvxx_volt(&drm->client.device);
+ struct nvkm_volt *volt = nvxx_volt(drm);
if (!volt || nvkm_volt_get(volt) < 0)
return 0;
@@ -335,7 +335,7 @@ static umode_t
nouveau_fan_is_visible(const void *data, u32 attr, int channel)
{
struct nouveau_drm *drm = nouveau_drm((struct drm_device *)data);
- struct nvkm_therm *therm = nvxx_therm(&drm->client.device);
+ struct nvkm_therm *therm = nvxx_therm(drm);
if (!therm || !therm->attr_get || nvkm_therm_fan_sense(therm) < 0)
return 0;
@@ -367,7 +367,7 @@ nouveau_temp_read(struct device *dev, u32 attr, int channel, long *val)
{
struct drm_device *drm_dev = dev_get_drvdata(dev);
struct nouveau_drm *drm = nouveau_drm(drm_dev);
- struct nvkm_therm *therm = nvxx_therm(&drm->client.device);
+ struct nvkm_therm *therm = nvxx_therm(drm);
int ret;
if (!therm || !therm->attr_get)
@@ -416,7 +416,7 @@ nouveau_fan_read(struct device *dev, u32 attr, int channel, long *val)
{
struct drm_device *drm_dev = dev_get_drvdata(dev);
struct nouveau_drm *drm = nouveau_drm(drm_dev);
- struct nvkm_therm *therm = nvxx_therm(&drm->client.device);
+ struct nvkm_therm *therm = nvxx_therm(drm);
if (!therm)
return -EOPNOTSUPP;
@@ -439,7 +439,7 @@ nouveau_in_read(struct device *dev, u32 attr, int channel, long *val)
{
struct drm_device *drm_dev = dev_get_drvdata(dev);
struct nouveau_drm *drm = nouveau_drm(drm_dev);
- struct nvkm_volt *volt = nvxx_volt(&drm->client.device);
+ struct nvkm_volt *volt = nvxx_volt(drm);
int ret;
if (!volt)
@@ -470,7 +470,7 @@ nouveau_pwm_read(struct device *dev, u32 attr, int channel, long *val)
{
struct drm_device *drm_dev = dev_get_drvdata(dev);
struct nouveau_drm *drm = nouveau_drm(drm_dev);
- struct nvkm_therm *therm = nvxx_therm(&drm->client.device);
+ struct nvkm_therm *therm = nvxx_therm(drm);
if (!therm || !therm->attr_get || !therm->fan_get)
return -EOPNOTSUPP;
@@ -496,7 +496,7 @@ nouveau_power_read(struct device *dev, u32 attr, int channel, long *val)
{
struct drm_device *drm_dev = dev_get_drvdata(dev);
struct nouveau_drm *drm = nouveau_drm(drm_dev);
- struct nvkm_iccsense *iccsense = nvxx_iccsense(&drm->client.device);
+ struct nvkm_iccsense *iccsense = nvxx_iccsense(drm);
if (!iccsense)
return -EOPNOTSUPP;
@@ -525,7 +525,7 @@ nouveau_temp_write(struct device *dev, u32 attr, int channel, long val)
{
struct drm_device *drm_dev = dev_get_drvdata(dev);
struct nouveau_drm *drm = nouveau_drm(drm_dev);
- struct nvkm_therm *therm = nvxx_therm(&drm->client.device);
+ struct nvkm_therm *therm = nvxx_therm(drm);
if (!therm || !therm->attr_set)
return -EOPNOTSUPP;
@@ -559,7 +559,7 @@ nouveau_pwm_write(struct device *dev, u32 attr, int channel, long val)
{
struct drm_device *drm_dev = dev_get_drvdata(dev);
struct nouveau_drm *drm = nouveau_drm(drm_dev);
- struct nvkm_therm *therm = nvxx_therm(&drm->client.device);
+ struct nvkm_therm *therm = nvxx_therm(drm);
if (!therm || !therm->attr_set)
return -EOPNOTSUPP;
@@ -664,9 +664,9 @@ nouveau_hwmon_init(struct drm_device *dev)
{
#if defined(CONFIG_HWMON) || (defined(MODULE) && defined(CONFIG_HWMON_MODULE))
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nvkm_iccsense *iccsense = nvxx_iccsense(&drm->client.device);
- struct nvkm_therm *therm = nvxx_therm(&drm->client.device);
- struct nvkm_volt *volt = nvxx_volt(&drm->client.device);
+ struct nvkm_iccsense *iccsense = nvxx_iccsense(drm);
+ struct nvkm_therm *therm = nvxx_therm(drm);
+ struct nvkm_volt *volt = nvxx_volt(drm);
const struct attribute_group *special_groups[N_ATTR_GROUPS];
struct nouveau_hwmon *hwmon;
struct device *hwmon_dev;
diff --git a/drivers/gpu/drm/nouveau/nouveau_led.c b/drivers/gpu/drm/nouveau/nouveau_led.c
index 2c5e0628da12..ac950518a820 100644
--- a/drivers/gpu/drm/nouveau/nouveau_led.c
+++ b/drivers/gpu/drm/nouveau/nouveau_led.c
@@ -78,7 +78,7 @@ int
nouveau_led_init(struct drm_device *dev)
{
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nvkm_gpio *gpio = nvxx_gpio(&drm->client.device);
+ struct nvkm_gpio *gpio = nvxx_gpio(drm);
struct dcb_gpio_func logo_led;
int ret;
diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c
index 25f31d5169e5..fac92fdbf9cc 100644
--- a/drivers/gpu/drm/nouveau/nouveau_mem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_mem.c
@@ -78,20 +78,19 @@ nouveau_mem_map(struct nouveau_mem *mem,
void
nouveau_mem_fini(struct nouveau_mem *mem)
{
- nvif_vmm_put(&mem->cli->drm->client.vmm.vmm, &mem->vma[1]);
- nvif_vmm_put(&mem->cli->drm->client.vmm.vmm, &mem->vma[0]);
- mutex_lock(&mem->cli->drm->master.lock);
+ nvif_vmm_put(&mem->drm->client.vmm.vmm, &mem->vma[1]);
+ nvif_vmm_put(&mem->drm->client.vmm.vmm, &mem->vma[0]);
+ mutex_lock(&mem->drm->client_mutex);
nvif_mem_dtor(&mem->mem);
- mutex_unlock(&mem->cli->drm->master.lock);
+ mutex_unlock(&mem->drm->client_mutex);
}
int
nouveau_mem_host(struct ttm_resource *reg, struct ttm_tt *tt)
{
struct nouveau_mem *mem = nouveau_mem(reg);
- struct nouveau_cli *cli = mem->cli;
- struct nouveau_drm *drm = cli->drm;
- struct nvif_mmu *mmu = &cli->mmu;
+ struct nouveau_drm *drm = mem->drm;
+ struct nvif_mmu *mmu = &drm->mmu;
struct nvif_mem_ram_v0 args = {};
u8 type;
int ret;
@@ -114,11 +113,11 @@ nouveau_mem_host(struct ttm_resource *reg, struct ttm_tt *tt)
else
args.dma = tt->dma_address;
- mutex_lock(&drm->master.lock);
- ret = nvif_mem_ctor_type(mmu, "ttmHostMem", cli->mem->oclass, type, PAGE_SHIFT,
+ mutex_lock(&drm->client_mutex);
+ ret = nvif_mem_ctor_type(mmu, "ttmHostMem", mmu->mem, type, PAGE_SHIFT,
reg->size,
&args, sizeof(args), &mem->mem);
- mutex_unlock(&drm->master.lock);
+ mutex_unlock(&drm->client_mutex);
return ret;
}
@@ -126,16 +125,15 @@ int
nouveau_mem_vram(struct ttm_resource *reg, bool contig, u8 page)
{
struct nouveau_mem *mem = nouveau_mem(reg);
- struct nouveau_cli *cli = mem->cli;
- struct nouveau_drm *drm = cli->drm;
- struct nvif_mmu *mmu = &cli->mmu;
+ struct nouveau_drm *drm = mem->drm;
+ struct nvif_mmu *mmu = &drm->mmu;
u64 size = ALIGN(reg->size, 1 << page);
int ret;
- mutex_lock(&drm->master.lock);
- switch (cli->mem->oclass) {
+ mutex_lock(&drm->client_mutex);
+ switch (mmu->mem) {
case NVIF_CLASS_MEM_GF100:
- ret = nvif_mem_ctor_type(mmu, "ttmVram", cli->mem->oclass,
+ ret = nvif_mem_ctor_type(mmu, "ttmVram", mmu->mem,
drm->ttm.type_vram, page, size,
&(struct gf100_mem_v0) {
.contig = contig,
@@ -143,7 +141,7 @@ nouveau_mem_vram(struct ttm_resource *reg, bool contig, u8 page)
&mem->mem);
break;
case NVIF_CLASS_MEM_NV50:
- ret = nvif_mem_ctor_type(mmu, "ttmVram", cli->mem->oclass,
+ ret = nvif_mem_ctor_type(mmu, "ttmVram", mmu->mem,
drm->ttm.type_vram, page, size,
&(struct nv50_mem_v0) {
.bankswz = mmu->kind[mem->kind] == 2,
@@ -156,7 +154,7 @@ nouveau_mem_vram(struct ttm_resource *reg, bool contig, u8 page)
WARN_ON(1);
break;
}
- mutex_unlock(&drm->master.lock);
+ mutex_unlock(&drm->client_mutex);
reg->start = mem->mem.addr >> PAGE_SHIFT;
return ret;
@@ -173,7 +171,7 @@ nouveau_mem_del(struct ttm_resource_manager *man, struct ttm_resource *reg)
}
int
-nouveau_mem_new(struct nouveau_cli *cli, u8 kind, u8 comp,
+nouveau_mem_new(struct nouveau_drm *drm, u8 kind, u8 comp,
struct ttm_resource **res)
{
struct nouveau_mem *mem;
@@ -181,7 +179,7 @@ nouveau_mem_new(struct nouveau_cli *cli, u8 kind, u8 comp,
if (!(mem = kzalloc(sizeof(*mem), GFP_KERNEL)))
return -ENOMEM;
- mem->cli = cli;
+ mem->drm = drm;
mem->kind = kind;
mem->comp = comp;
diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.h b/drivers/gpu/drm/nouveau/nouveau_mem.h
index 5365a3d3a17f..a070ee049f6b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_mem.h
+++ b/drivers/gpu/drm/nouveau/nouveau_mem.h
@@ -8,7 +8,7 @@ struct ttm_tt;
struct nouveau_mem {
struct ttm_resource base;
- struct nouveau_cli *cli;
+ struct nouveau_drm *drm;
u8 kind;
u8 comp;
struct nvif_mem mem;
@@ -21,7 +21,7 @@ nouveau_mem(struct ttm_resource *reg)
return container_of(reg, struct nouveau_mem, base);
}
-int nouveau_mem_new(struct nouveau_cli *, u8 kind, u8 comp,
+int nouveau_mem_new(struct nouveau_drm *, u8 kind, u8 comp,
struct ttm_resource **);
void nouveau_mem_del(struct ttm_resource_manager *man,
struct ttm_resource *);
diff --git a/drivers/gpu/drm/nouveau/nouveau_nvif.c b/drivers/gpu/drm/nouveau/nouveau_nvif.c
index 1d49ebdfd5dc..adb802421fda 100644
--- a/drivers/gpu/drm/nouveau/nouveau_nvif.c
+++ b/drivers/gpu/drm/nouveau/nouveau_nvif.c
@@ -35,7 +35,6 @@
#include <nvif/ioctl.h>
#include "nouveau_drv.h"
-#include "nouveau_usif.h"
static void
nvkm_client_unmap(void *priv, void __iomem *ptr, u32 size)
@@ -98,5 +97,4 @@ nvif_driver_nvkm = {
.ioctl = nvkm_client_ioctl,
.map = nvkm_client_map,
.unmap = nvkm_client_unmap,
- .keep = false,
};
diff --git a/drivers/gpu/drm/nouveau/nouveau_platform.c b/drivers/gpu/drm/nouveau/nouveau_platform.c
index bf2dc7567ea4..829fdc6e4031 100644
--- a/drivers/gpu/drm/nouveau/nouveau_platform.c
+++ b/drivers/gpu/drm/nouveau/nouveau_platform.c
@@ -26,7 +26,6 @@ static int nouveau_platform_probe(struct platform_device *pdev)
const struct nvkm_device_tegra_func *func;
struct nvkm_device *device = NULL;
struct drm_device *drm;
- int ret;
func = of_device_get_match_data(&pdev->dev);
@@ -34,19 +33,14 @@ static int nouveau_platform_probe(struct platform_device *pdev)
if (IS_ERR(drm))
return PTR_ERR(drm);
- ret = drm_dev_register(drm, 0);
- if (ret < 0) {
- drm_dev_put(drm);
- return ret;
- }
-
return 0;
}
static void nouveau_platform_remove(struct platform_device *pdev)
{
- struct drm_device *dev = platform_get_drvdata(pdev);
- nouveau_drm_device_remove(dev);
+ struct nouveau_drm *drm = platform_get_drvdata(pdev);
+
+ nouveau_drm_device_remove(drm);
}
#if IS_ENABLED(CONFIG_OF)
diff --git a/drivers/gpu/drm/nouveau/nouveau_prime.c b/drivers/gpu/drm/nouveau/nouveau_prime.c
index b58ab595faf8..cd95446d6851 100644
--- a/drivers/gpu/drm/nouveau/nouveau_prime.c
+++ b/drivers/gpu/drm/nouveau/nouveau_prime.c
@@ -64,7 +64,8 @@ struct drm_gem_object *nouveau_gem_prime_import_sg_table(struct drm_device *dev,
* to the caller, instead of a normal nouveau_bo ttm reference. */
ret = drm_gem_object_init(dev, &nvbo->bo.base, size);
if (ret) {
- nouveau_bo_ref(NULL, &nvbo);
+ drm_gem_object_release(&nvbo->bo.base);
+ kfree(nvbo);
obj = ERR_PTR(-ENOMEM);
goto unlock;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_sched.c b/drivers/gpu/drm/nouveau/nouveau_sched.c
index 32fa2e273965..eb6c3f9a01f5 100644
--- a/drivers/gpu/drm/nouveau/nouveau_sched.c
+++ b/drivers/gpu/drm/nouveau/nouveau_sched.c
@@ -379,7 +379,7 @@ nouveau_sched_timedout_job(struct drm_sched_job *sched_job)
else
NV_PRINTK(warn, job->cli, "Generic job timeout.\n");
- drm_sched_start(sched, true);
+ drm_sched_start(sched);
return stat;
}
@@ -404,7 +404,7 @@ nouveau_sched_init(struct nouveau_sched *sched, struct nouveau_drm *drm,
{
struct drm_gpu_scheduler *drm_sched = &sched->base;
struct drm_sched_entity *entity = &sched->entity;
- long job_hang_limit = msecs_to_jiffies(NOUVEAU_SCHED_JOB_TIMEOUT_MS);
+ const long timeout = msecs_to_jiffies(NOUVEAU_SCHED_JOB_TIMEOUT_MS);
int ret;
if (!wq) {
@@ -418,7 +418,7 @@ nouveau_sched_init(struct nouveau_sched *sched, struct nouveau_drm *drm,
ret = drm_sched_init(drm_sched, &nouveau_sched_ops, wq,
NOUVEAU_SCHED_PRIORITY_COUNT,
- credit_limit, 0, job_hang_limit,
+ credit_limit, 0, timeout,
NULL, NULL, "nouveau_sched", drm->dev->dev);
if (ret)
goto fail_wq;
diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
index b14895f75b3c..bd870028514b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
@@ -43,7 +43,7 @@ nouveau_sgdma_bind(struct ttm_device *bdev, struct ttm_tt *ttm, struct ttm_resou
return ret;
if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA) {
- ret = nouveau_mem_map(mem, &mem->cli->vmm.vmm, &mem->vma[0]);
+ ret = nouveau_mem_map(mem, &drm->client.vmm.vmm, &mem->vma[0]);
if (ret) {
nouveau_mem_fini(mem);
return ret;
diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c
index 486f39f31a38..e244927eb5d4 100644
--- a/drivers/gpu/drm/nouveau/nouveau_ttm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c
@@ -73,7 +73,7 @@ nouveau_vram_manager_new(struct ttm_resource_manager *man,
if (drm->client.device.info.ram_size == 0)
return -ENOMEM;
- ret = nouveau_mem_new(&drm->master, nvbo->kind, nvbo->comp, res);
+ ret = nouveau_mem_new(drm, nvbo->kind, nvbo->comp, res);
if (ret)
return ret;
@@ -105,7 +105,7 @@ nouveau_gart_manager_new(struct ttm_resource_manager *man,
struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
int ret;
- ret = nouveau_mem_new(&drm->master, nvbo->kind, nvbo->comp, res);
+ ret = nouveau_mem_new(drm, nvbo->kind, nvbo->comp, res);
if (ret)
return ret;
@@ -132,13 +132,13 @@ nv04_gart_manager_new(struct ttm_resource_manager *man,
struct nouveau_mem *mem;
int ret;
- ret = nouveau_mem_new(&drm->master, nvbo->kind, nvbo->comp, res);
+ ret = nouveau_mem_new(drm, nvbo->kind, nvbo->comp, res);
if (ret)
return ret;
mem = nouveau_mem(*res);
ttm_resource_init(bo, place, *res);
- ret = nvif_vmm_get(&mem->cli->vmm.vmm, PTES, false, 12, 0,
+ ret = nvif_vmm_get(&drm->client.vmm.vmm, PTES, false, 12, 0,
(long)(*res)->size, &mem->vma[0]);
if (ret) {
nouveau_mem_del(man, *res);
@@ -261,7 +261,7 @@ nouveau_ttm_fini_gtt(struct nouveau_drm *drm)
int
nouveau_ttm_init(struct nouveau_drm *drm)
{
- struct nvkm_device *device = nvxx_device(&drm->client.device);
+ struct nvkm_device *device = nvxx_device(drm);
struct nvkm_pci *pci = device->pci;
struct nvif_mmu *mmu = &drm->client.mmu;
struct drm_device *dev = drm->dev;
@@ -348,7 +348,7 @@ nouveau_ttm_init(struct nouveau_drm *drm)
void
nouveau_ttm_fini(struct nouveau_drm *drm)
{
- struct nvkm_device *device = nvxx_device(&drm->client.device);
+ struct nvkm_device *device = nvxx_device(drm);
nouveau_ttm_fini_vram(drm);
nouveau_ttm_fini_gtt(drm);
diff --git a/drivers/gpu/drm/nouveau/nouveau_usif.c b/drivers/gpu/drm/nouveau/nouveau_usif.c
deleted file mode 100644
index 002d1479ba89..000000000000
--- a/drivers/gpu/drm/nouveau/nouveau_usif.c
+++ /dev/null
@@ -1,194 +0,0 @@
-/*
- * Copyright 2014 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs <[email protected]>
- */
-
-#include "nouveau_drv.h"
-#include "nouveau_usif.h"
-#include "nouveau_abi16.h"
-
-#include <nvif/unpack.h>
-#include <nvif/client.h>
-#include <nvif/ioctl.h>
-
-#include <nvif/class.h>
-#include <nvif/cl0080.h>
-
-struct usif_object {
- struct list_head head;
- u8 route;
- u64 token;
-};
-
-static void
-usif_object_dtor(struct usif_object *object)
-{
- list_del(&object->head);
- kfree(object);
-}
-
-static int
-usif_object_new(struct drm_file *f, void *data, u32 size, void *argv, u32 argc, bool parent_abi16)
-{
- struct nouveau_cli *cli = nouveau_cli(f);
- struct nvif_client *client = &cli->base;
- union {
- struct nvif_ioctl_new_v0 v0;
- } *args = data;
- struct usif_object *object;
- int ret = -ENOSYS;
-
- if ((ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, true)))
- return ret;
-
- switch (args->v0.oclass) {
- case NV_DMA_FROM_MEMORY:
- case NV_DMA_TO_MEMORY:
- case NV_DMA_IN_MEMORY:
- return -EINVAL;
- case NV_DEVICE: {
- union {
- struct nv_device_v0 v0;
- } *args = data;
-
- if ((ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false)))
- return ret;
-
- args->v0.priv = false;
- break;
- }
- default:
- if (!parent_abi16)
- return -EINVAL;
- break;
- }
-
- if (!(object = kmalloc(sizeof(*object), GFP_KERNEL)))
- return -ENOMEM;
- list_add(&object->head, &cli->objects);
-
- object->route = args->v0.route;
- object->token = args->v0.token;
- args->v0.route = NVDRM_OBJECT_USIF;
- args->v0.token = (unsigned long)(void *)object;
- ret = nvif_client_ioctl(client, argv, argc);
- if (ret) {
- usif_object_dtor(object);
- return ret;
- }
-
- args->v0.token = object->token;
- args->v0.route = object->route;
- return 0;
-}
-
-int
-usif_ioctl(struct drm_file *filp, void __user *user, u32 argc)
-{
- struct nouveau_cli *cli = nouveau_cli(filp);
- struct nvif_client *client = &cli->base;
- void *data = kmalloc(argc, GFP_KERNEL);
- u32 size = argc;
- union {
- struct nvif_ioctl_v0 v0;
- } *argv = data;
- struct usif_object *object;
- bool abi16 = false;
- u8 owner;
- int ret;
-
- if (ret = -ENOMEM, !argv)
- goto done;
- if (ret = -EFAULT, copy_from_user(argv, user, size))
- goto done;
-
- if (!(ret = nvif_unpack(-ENOSYS, &data, &size, argv->v0, 0, 0, true))) {
- /* block access to objects not created via this interface */
- owner = argv->v0.owner;
- if (argv->v0.object == 0ULL &&
- argv->v0.type != NVIF_IOCTL_V0_DEL)
- argv->v0.owner = NVDRM_OBJECT_ANY; /* except client */
- else
- argv->v0.owner = NVDRM_OBJECT_USIF;
- } else
- goto done;
-
- /* USIF slightly abuses some return-only ioctl members in order
- * to provide interoperability with the older ABI16 objects
- */
- mutex_lock(&cli->mutex);
- if (argv->v0.route) {
- if (ret = -EINVAL, argv->v0.route == 0xff)
- ret = nouveau_abi16_usif(filp, argv, argc);
- if (ret) {
- mutex_unlock(&cli->mutex);
- goto done;
- }
-
- abi16 = true;
- }
-
- switch (argv->v0.type) {
- case NVIF_IOCTL_V0_NEW:
- ret = usif_object_new(filp, data, size, argv, argc, abi16);
- break;
- default:
- ret = nvif_client_ioctl(client, argv, argc);
- break;
- }
- if (argv->v0.route == NVDRM_OBJECT_USIF) {
- object = (void *)(unsigned long)argv->v0.token;
- argv->v0.route = object->route;
- argv->v0.token = object->token;
- if (ret == 0 && argv->v0.type == NVIF_IOCTL_V0_DEL) {
- list_del(&object->head);
- kfree(object);
- }
- } else {
- argv->v0.route = NVIF_IOCTL_V0_ROUTE_HIDDEN;
- argv->v0.token = 0;
- }
- argv->v0.owner = owner;
- mutex_unlock(&cli->mutex);
-
- if (copy_to_user(user, argv, argc))
- ret = -EFAULT;
-done:
- kfree(argv);
- return ret;
-}
-
-void
-usif_client_fini(struct nouveau_cli *cli)
-{
- struct usif_object *object, *otemp;
-
- list_for_each_entry_safe(object, otemp, &cli->objects, head) {
- usif_object_dtor(object);
- }
-}
-
-void
-usif_client_init(struct nouveau_cli *cli)
-{
- INIT_LIST_HEAD(&cli->objects);
-}
diff --git a/drivers/gpu/drm/nouveau/nouveau_usif.h b/drivers/gpu/drm/nouveau/nouveau_usif.h
deleted file mode 100644
index dc90d4a9d0d9..000000000000
--- a/drivers/gpu/drm/nouveau/nouveau_usif.h
+++ /dev/null
@@ -1,10 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-#ifndef __NOUVEAU_USIF_H__
-#define __NOUVEAU_USIF_H__
-
-void usif_client_init(struct nouveau_cli *);
-void usif_client_fini(struct nouveau_cli *);
-int usif_ioctl(struct drm_file *, void __user *, u32);
-int usif_notify(const void *, u32, const void *, u32);
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_uvmm.c b/drivers/gpu/drm/nouveau/nouveau_uvmm.c
index 9402fa320a7e..48f105239f42 100644
--- a/drivers/gpu/drm/nouveau/nouveau_uvmm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_uvmm.c
@@ -1803,6 +1803,7 @@ nouveau_uvmm_bo_validate(struct drm_gpuvm_bo *vm_bo, struct drm_exec *exec)
{
struct nouveau_bo *nvbo = nouveau_gem_object(vm_bo->obj);
+ nouveau_bo_placement_set(nvbo, nvbo->valid_domains, 0);
return nouveau_bo_validate(nvbo, true, false);
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_vga.c b/drivers/gpu/drm/nouveau/nouveau_vga.c
index f8bf0ec26844..2525e08938b3 100644
--- a/drivers/gpu/drm/nouveau/nouveau_vga.c
+++ b/drivers/gpu/drm/nouveau/nouveau_vga.c
@@ -11,7 +11,7 @@
static unsigned int
nouveau_vga_set_decode(struct pci_dev *pdev, bool state)
{
- struct nouveau_drm *drm = nouveau_drm(pci_get_drvdata(pdev));
+ struct nouveau_drm *drm = pci_get_drvdata(pdev);
struct nvif_object *device = &drm->client.device.object;
if (drm->client.device.info.family == NV_DEVICE_INFO_V0_CURIE &&
@@ -34,7 +34,8 @@ static void
nouveau_switcheroo_set_state(struct pci_dev *pdev,
enum vga_switcheroo_state state)
{
- struct drm_device *dev = pci_get_drvdata(pdev);
+ struct nouveau_drm *drm = pci_get_drvdata(pdev);
+ struct drm_device *dev = drm->dev;
if ((nouveau_is_optimus() || nouveau_is_v1_dsm()) && state == VGA_SWITCHEROO_OFF)
return;
@@ -56,21 +57,22 @@ nouveau_switcheroo_set_state(struct pci_dev *pdev,
static void
nouveau_switcheroo_reprobe(struct pci_dev *pdev)
{
- struct drm_device *dev = pci_get_drvdata(pdev);
- drm_fb_helper_output_poll_changed(dev);
+ struct nouveau_drm *drm = pci_get_drvdata(pdev);
+
+ drm_fb_helper_output_poll_changed(drm->dev);
}
static bool
nouveau_switcheroo_can_switch(struct pci_dev *pdev)
{
- struct drm_device *dev = pci_get_drvdata(pdev);
+ struct nouveau_drm *drm = pci_get_drvdata(pdev);
/*
* FIXME: open_count is protected by drm_global_mutex but that would lead to
* locking inversion with the driver load path. And the access here is
* completely racy anyway. So don't bother with locking for now.
*/
- return atomic_read(&dev->open_count) == 0;
+ return atomic_read(&drm->dev->open_count) == 0;
}
static const struct vga_switcheroo_client_ops
diff --git a/drivers/gpu/drm/nouveau/nv04_fence.c b/drivers/gpu/drm/nouveau/nv04_fence.c
index cdbc75e3d1f6..fa5c6029f783 100644
--- a/drivers/gpu/drm/nouveau/nv04_fence.c
+++ b/drivers/gpu/drm/nouveau/nv04_fence.c
@@ -39,7 +39,7 @@ struct nv04_fence_priv {
static int
nv04_fence_emit(struct nouveau_fence *fence)
{
- struct nvif_push *push = unrcu_pointer(fence->channel)->chan.push;
+ struct nvif_push *push = &unrcu_pointer(fence->channel)->chan.push;
int ret = PUSH_WAIT(push, 2);
if (ret == 0) {
PUSH_NVSQ(push, NV_SW, 0x0150, fence->base.seqno);
diff --git a/drivers/gpu/drm/nouveau/nv10_fence.c b/drivers/gpu/drm/nouveau/nv10_fence.c
index c6a0db5b9e21..8c73f40e3bda 100644
--- a/drivers/gpu/drm/nouveau/nv10_fence.c
+++ b/drivers/gpu/drm/nouveau/nv10_fence.c
@@ -32,7 +32,7 @@
int
nv10_fence_emit(struct nouveau_fence *fence)
{
- struct nvif_push *push = fence->channel->chan.push;
+ struct nvif_push *push = &fence->channel->chan.push;
int ret = PUSH_WAIT(push, 2);
if (ret == 0) {
PUSH_MTHD(push, NV06E, SET_REFERENCE, fence->base.seqno);
@@ -88,7 +88,7 @@ nv10_fence_destroy(struct nouveau_drm *drm)
nouveau_bo_unmap(priv->bo);
if (priv->bo)
nouveau_bo_unpin(priv->bo);
- nouveau_bo_ref(NULL, &priv->bo);
+ nouveau_bo_fini(priv->bo);
drm->fence = NULL;
kfree(priv);
}
diff --git a/drivers/gpu/drm/nouveau/nv17_fence.c b/drivers/gpu/drm/nouveau/nv17_fence.c
index 07c2e0878c24..d09bfd11369f 100644
--- a/drivers/gpu/drm/nouveau/nv17_fence.c
+++ b/drivers/gpu/drm/nouveau/nv17_fence.c
@@ -36,11 +36,11 @@ int
nv17_fence_sync(struct nouveau_fence *fence,
struct nouveau_channel *prev, struct nouveau_channel *chan)
{
- struct nouveau_cli *cli = (void *)prev->user.client;
- struct nv10_fence_priv *priv = chan->drm->fence;
+ struct nouveau_cli *cli = prev->cli;
+ struct nv10_fence_priv *priv = cli->drm->fence;
struct nv10_fence_chan *fctx = chan->fence;
- struct nvif_push *ppush = prev->chan.push;
- struct nvif_push *npush = chan->chan.push;
+ struct nvif_push *ppush = &prev->chan.push;
+ struct nvif_push *npush = &chan->chan.push;
u32 value;
int ret;
@@ -76,7 +76,7 @@ nv17_fence_sync(struct nouveau_fence *fence,
static int
nv17_fence_context_new(struct nouveau_channel *chan)
{
- struct nv10_fence_priv *priv = chan->drm->fence;
+ struct nv10_fence_priv *priv = chan->cli->drm->fence;
struct ttm_resource *reg = priv->bo->bo.resource;
struct nv10_fence_chan *fctx;
u32 start = reg->start * PAGE_SIZE;
@@ -141,7 +141,7 @@ nv17_fence_create(struct nouveau_drm *drm)
nouveau_bo_unpin(priv->bo);
}
if (ret)
- nouveau_bo_ref(NULL, &priv->bo);
+ nouveau_bo_fini(priv->bo);
}
if (ret) {
diff --git a/drivers/gpu/drm/nouveau/nv50_fence.c b/drivers/gpu/drm/nouveau/nv50_fence.c
index ea1e1f480bfe..62e28dddf87c 100644
--- a/drivers/gpu/drm/nouveau/nv50_fence.c
+++ b/drivers/gpu/drm/nouveau/nv50_fence.c
@@ -35,7 +35,7 @@
static int
nv50_fence_context_new(struct nouveau_channel *chan)
{
- struct nv10_fence_priv *priv = chan->drm->fence;
+ struct nv10_fence_priv *priv = chan->cli->drm->fence;
struct nv10_fence_chan *fctx;
struct ttm_resource *reg = priv->bo->bo.resource;
u32 start = reg->start * PAGE_SIZE;
@@ -92,7 +92,7 @@ nv50_fence_create(struct nouveau_drm *drm)
nouveau_bo_unpin(priv->bo);
}
if (ret)
- nouveau_bo_ref(NULL, &priv->bo);
+ nouveau_bo_fini(priv->bo);
}
if (ret) {
diff --git a/drivers/gpu/drm/nouveau/nv84_fence.c b/drivers/gpu/drm/nouveau/nv84_fence.c
index 812b8c62eeba..aa7dd0c5d917 100644
--- a/drivers/gpu/drm/nouveau/nv84_fence.c
+++ b/drivers/gpu/drm/nouveau/nv84_fence.c
@@ -35,7 +35,7 @@
static int
nv84_fence_emit32(struct nouveau_channel *chan, u64 virtual, u32 sequence)
{
- struct nvif_push *push = chan->chan.push;
+ struct nvif_push *push = &chan->chan.push;
int ret = PUSH_WAIT(push, 8);
if (ret == 0) {
PUSH_MTHD(push, NV826F, SET_CONTEXT_DMA_SEMAPHORE, chan->vram.handle);
@@ -58,7 +58,7 @@ nv84_fence_emit32(struct nouveau_channel *chan, u64 virtual, u32 sequence)
static int
nv84_fence_sync32(struct nouveau_channel *chan, u64 virtual, u32 sequence)
{
- struct nvif_push *push = chan->chan.push;
+ struct nvif_push *push = &chan->chan.push;
int ret = PUSH_WAIT(push, 7);
if (ret == 0) {
PUSH_MTHD(push, NV826F, SET_CONTEXT_DMA_SEMAPHORE, chan->vram.handle);
@@ -79,7 +79,7 @@ nv84_fence_sync32(struct nouveau_channel *chan, u64 virtual, u32 sequence)
static inline u32
nv84_fence_chid(struct nouveau_channel *chan)
{
- return chan->drm->runl[chan->runlist].chan_id_base + chan->chid;
+ return chan->cli->drm->runl[chan->runlist].chan_id_base + chan->chid;
}
static int
@@ -105,14 +105,14 @@ nv84_fence_sync(struct nouveau_fence *fence,
static u32
nv84_fence_read(struct nouveau_channel *chan)
{
- struct nv84_fence_priv *priv = chan->drm->fence;
+ struct nv84_fence_priv *priv = chan->cli->drm->fence;
return nouveau_bo_rd32(priv->bo, nv84_fence_chid(chan) * 16/4);
}
static void
nv84_fence_context_del(struct nouveau_channel *chan)
{
- struct nv84_fence_priv *priv = chan->drm->fence;
+ struct nv84_fence_priv *priv = chan->cli->drm->fence;
struct nv84_fence_chan *fctx = chan->fence;
nouveau_bo_wr32(priv->bo, nv84_fence_chid(chan) * 16 / 4, fctx->base.sequence);
@@ -127,7 +127,7 @@ nv84_fence_context_del(struct nouveau_channel *chan)
int
nv84_fence_context_new(struct nouveau_channel *chan)
{
- struct nv84_fence_priv *priv = chan->drm->fence;
+ struct nv84_fence_priv *priv = chan->cli->drm->fence;
struct nv84_fence_chan *fctx;
int ret;
@@ -188,7 +188,7 @@ nv84_fence_destroy(struct nouveau_drm *drm)
nouveau_bo_unmap(priv->bo);
if (priv->bo)
nouveau_bo_unpin(priv->bo);
- nouveau_bo_ref(NULL, &priv->bo);
+ nouveau_bo_fini(priv->bo);
drm->fence = NULL;
kfree(priv);
}
@@ -232,7 +232,7 @@ nv84_fence_create(struct nouveau_drm *drm)
nouveau_bo_unpin(priv->bo);
}
if (ret)
- nouveau_bo_ref(NULL, &priv->bo);
+ nouveau_bo_fini(priv->bo);
}
if (ret)
diff --git a/drivers/gpu/drm/nouveau/nvc0_fence.c b/drivers/gpu/drm/nouveau/nvc0_fence.c
index e1461c0b0779..a5e98d0d4217 100644
--- a/drivers/gpu/drm/nouveau/nvc0_fence.c
+++ b/drivers/gpu/drm/nouveau/nvc0_fence.c
@@ -34,7 +34,7 @@
static int
nvc0_fence_emit32(struct nouveau_channel *chan, u64 virtual, u32 sequence)
{
- struct nvif_push *push = chan->chan.push;
+ struct nvif_push *push = &chan->chan.push;
int ret = PUSH_WAIT(push, 6);
if (ret == 0) {
PUSH_MTHD(push, NV906F, SEMAPHOREA,
@@ -57,7 +57,7 @@ nvc0_fence_emit32(struct nouveau_channel *chan, u64 virtual, u32 sequence)
static int
nvc0_fence_sync32(struct nouveau_channel *chan, u64 virtual, u32 sequence)
{
- struct nvif_push *push = chan->chan.push;
+ struct nvif_push *push = &chan->chan.push;
int ret = PUSH_WAIT(push, 5);
if (ret == 0) {
PUSH_MTHD(push, NV906F, SEMAPHOREA,
diff --git a/drivers/gpu/drm/nouveau/nvif/client.c b/drivers/gpu/drm/nouveau/nvif/client.c
index 3a27245f467f..fdf5054ed7d8 100644
--- a/drivers/gpu/drm/nouveau/nvif/client.c
+++ b/drivers/gpu/drm/nouveau/nvif/client.c
@@ -30,12 +30,6 @@
#include <nvif/if0000.h>
int
-nvif_client_ioctl(struct nvif_client *client, void *data, u32 size)
-{
- return client->driver->ioctl(client->object.priv, data, size, NULL);
-}
-
-int
nvif_client_suspend(struct nvif_client *client)
{
return client->driver->suspend(client->object.priv);
@@ -51,22 +45,13 @@ void
nvif_client_dtor(struct nvif_client *client)
{
nvif_object_dtor(&client->object);
- if (client->driver) {
- if (client->driver->fini)
- client->driver->fini(client->object.priv);
- client->driver = NULL;
- }
+ client->driver = NULL;
}
int
-nvif_client_ctor(struct nvif_client *parent, const char *name, u64 device,
- struct nvif_client *client)
+nvif_client_ctor(struct nvif_client *parent, const char *name, struct nvif_client *client)
{
- struct nvif_client_v0 args = { .device = device };
- struct {
- struct nvif_ioctl_v0 ioctl;
- struct nvif_ioctl_nop_v0 nop;
- } nop = {};
+ struct nvif_client_v0 args = {};
int ret;
strscpy_pad(args.name, name, sizeof(args.name));
@@ -79,15 +64,6 @@ nvif_client_ctor(struct nvif_client *parent, const char *name, u64 device,
client->object.client = client;
client->object.handle = ~0;
- client->route = NVIF_IOCTL_V0_ROUTE_NVIF;
client->driver = parent->driver;
-
- if (ret == 0) {
- ret = nvif_client_ioctl(client, &nop, sizeof(nop));
- client->version = nop.nop.version;
- }
-
- if (ret)
- nvif_client_dtor(client);
- return ret;
+ return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nvif/device.c b/drivers/gpu/drm/nouveau/nvif/device.c
index 8c3d883f3313..24880931039f 100644
--- a/drivers/gpu/drm/nouveau/nvif/device.c
+++ b/drivers/gpu/drm/nouveau/nvif/device.c
@@ -21,8 +21,8 @@
*
* Authors: Ben Skeggs <[email protected]>
*/
-
#include <nvif/device.h>
+#include <nvif/client.h>
u64
nvif_device_time(struct nvif_device *device)
@@ -38,6 +38,12 @@ nvif_device_time(struct nvif_device *device)
return device->user.func->time(&device->user);
}
+int
+nvif_device_map(struct nvif_device *device)
+{
+ return nvif_object_map(&device->object, NULL, 0);
+}
+
void
nvif_device_dtor(struct nvif_device *device)
{
@@ -48,11 +54,10 @@ nvif_device_dtor(struct nvif_device *device)
}
int
-nvif_device_ctor(struct nvif_object *parent, const char *name, u32 handle,
- s32 oclass, void *data, u32 size, struct nvif_device *device)
+nvif_device_ctor(struct nvif_client *client, const char *name, struct nvif_device *device)
{
- int ret = nvif_object_ctor(parent, name ? name : "nvifDevice", handle,
- oclass, data, size, &device->object);
+ int ret = nvif_object_ctor(&client->object, name ? name : "nvifDevice", 0,
+ 0x0080, NULL, 0, &device->object);
device->runlist = NULL;
device->user.func = NULL;
if (ret == 0) {
diff --git a/drivers/gpu/drm/nouveau/nvif/driver.c b/drivers/gpu/drm/nouveau/nvif/driver.c
index 5e00dd07afed..78706e97a6a2 100644
--- a/drivers/gpu/drm/nouveau/nvif/driver.c
+++ b/drivers/gpu/drm/nouveau/nvif/driver.c
@@ -24,35 +24,17 @@
#include <nvif/driver.h>
#include <nvif/client.h>
-static const struct nvif_driver *
-nvif_driver[] = {
-#ifdef __KERNEL__
- &nvif_driver_nvkm,
-#else
- &nvif_driver_drm,
- &nvif_driver_lib,
- &nvif_driver_null,
-#endif
- NULL
-};
-
int
nvif_driver_init(const char *drv, const char *cfg, const char *dbg,
const char *name, u64 device, struct nvif_client *client)
{
- int ret = -EINVAL, i;
+ int ret;
+
+ client->driver = &nvif_driver_nvkm;
- for (i = 0; (client->driver = nvif_driver[i]); i++) {
- if (!drv || !strcmp(client->driver->name, drv)) {
- ret = client->driver->init(name, device, cfg, dbg,
- &client->object.priv);
- if (ret == 0)
- break;
- client->driver->fini(client->object.priv);
- }
- }
+ ret = client->driver->init(name, device, cfg, dbg, &client->object.priv);
+ if (ret)
+ return ret;
- if (ret == 0)
- ret = nvif_client_ctor(client, name, device, client);
- return ret;
+ return nvif_client_ctor(client, name, client);
}
diff --git a/drivers/gpu/drm/nouveau/nvif/object.c b/drivers/gpu/drm/nouveau/nvif/object.c
index 1d19c87eaec1..0b87278ac0f8 100644
--- a/drivers/gpu/drm/nouveau/nvif/object.c
+++ b/drivers/gpu/drm/nouveau/nvif/object.c
@@ -40,7 +40,6 @@ nvif_object_ioctl(struct nvif_object *object, void *data, u32 size, void **hack)
args->v0.object = nvif_handle(object);
else
args->v0.object = 0;
- args->v0.owner = NVIF_IOCTL_V0_OWNER_ANY;
} else
return -ENOSYS;
@@ -98,43 +97,6 @@ nvif_object_sclass_get(struct nvif_object *object, struct nvif_sclass **psclass)
return ret;
}
-u32
-nvif_object_rd(struct nvif_object *object, int size, u64 addr)
-{
- struct {
- struct nvif_ioctl_v0 ioctl;
- struct nvif_ioctl_rd_v0 rd;
- } args = {
- .ioctl.type = NVIF_IOCTL_V0_RD,
- .rd.size = size,
- .rd.addr = addr,
- };
- int ret = nvif_object_ioctl(object, &args, sizeof(args), NULL);
- if (ret) {
- /*XXX: warn? */
- return 0;
- }
- return args.rd.data;
-}
-
-void
-nvif_object_wr(struct nvif_object *object, int size, u64 addr, u32 data)
-{
- struct {
- struct nvif_ioctl_v0 ioctl;
- struct nvif_ioctl_wr_v0 wr;
- } args = {
- .ioctl.type = NVIF_IOCTL_V0_WR,
- .wr.size = size,
- .wr.addr = addr,
- .wr.data = data,
- };
- int ret = nvif_object_ioctl(object, &args, sizeof(args), NULL);
- if (ret) {
- /*XXX: warn? */
- }
-}
-
int
nvif_object_mthd(struct nvif_object *object, u32 mthd, void *data, u32 size)
{
@@ -299,8 +261,6 @@ nvif_object_ctor(struct nvif_object *parent, const char *name, u32 handle,
args->ioctl.version = 0;
args->ioctl.type = NVIF_IOCTL_V0_NEW;
args->new.version = 0;
- args->new.route = parent->client->route;
- args->new.token = nvif_handle(object);
args->new.object = nvif_handle(object);
args->new.handle = handle;
args->new.oclass = oclass;
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/client.c b/drivers/gpu/drm/nouveau/nvkm/core/client.c
index c55662937ab2..72c88db627a5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/core/client.c
+++ b/drivers/gpu/drm/nouveau/nvkm/core/client.c
@@ -42,7 +42,7 @@ nvkm_uclient_new(const struct nvkm_oclass *oclass, void *argv, u32 argc,
if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, false))){
args->v0.name[sizeof(args->v0.name) - 1] = 0;
- ret = nvkm_client_new(args->v0.name, args->v0.device, NULL,
+ ret = nvkm_client_new(args->v0.name, oclass->client->device, NULL,
NULL, oclass->client->event, &client);
if (ret)
return ret;
@@ -51,8 +51,6 @@ nvkm_uclient_new(const struct nvkm_oclass *oclass, void *argv, u32 argc,
client->object.client = oclass->client;
client->object.handle = oclass->handle;
- client->object.route = oclass->route;
- client->object.token = oclass->token;
client->object.object = oclass->object;
client->debug = oclass->client->debug;
*pobject = &client->object;
@@ -67,58 +65,6 @@ nvkm_uclient_sclass = {
.ctor = nvkm_uclient_new,
};
-static const struct nvkm_object_func nvkm_client;
-struct nvkm_client *
-nvkm_client_search(struct nvkm_client *client, u64 handle)
-{
- struct nvkm_object *object;
-
- object = nvkm_object_search(client, handle, &nvkm_client);
- if (IS_ERR(object))
- return (void *)object;
-
- return nvkm_client(object);
-}
-
-static int
-nvkm_client_mthd_devlist(struct nvkm_client *client, void *data, u32 size)
-{
- union {
- struct nvif_client_devlist_v0 v0;
- } *args = data;
- int ret = -ENOSYS;
-
- nvif_ioctl(&client->object, "client devlist size %d\n", size);
- if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, true))) {
- nvif_ioctl(&client->object, "client devlist vers %d count %d\n",
- args->v0.version, args->v0.count);
- if (size == sizeof(args->v0.device[0]) * args->v0.count) {
- ret = nvkm_device_list(args->v0.device, args->v0.count);
- if (ret >= 0) {
- args->v0.count = ret;
- ret = 0;
- }
- } else {
- ret = -EINVAL;
- }
- }
-
- return ret;
-}
-
-static int
-nvkm_client_mthd(struct nvkm_object *object, u32 mthd, void *data, u32 size)
-{
- struct nvkm_client *client = nvkm_client(object);
- switch (mthd) {
- case NVIF_CLIENT_V0_DEVLIST:
- return nvkm_client_mthd_devlist(client, data, size);
- default:
- break;
- }
- return -EINVAL;
-}
-
static int
nvkm_client_child_new(const struct nvkm_oclass *oclass,
void *data, u32 size, struct nvkm_object **pobject)
@@ -144,12 +90,6 @@ nvkm_client_child_get(struct nvkm_object *object, int index,
return 0;
}
-static int
-nvkm_client_fini(struct nvkm_object *object, bool suspend)
-{
- return 0;
-}
-
static void *
nvkm_client_dtor(struct nvkm_object *object)
{
@@ -159,8 +99,6 @@ nvkm_client_dtor(struct nvkm_object *object)
static const struct nvkm_object_func
nvkm_client = {
.dtor = nvkm_client_dtor,
- .fini = nvkm_client_fini,
- .mthd = nvkm_client_mthd,
.sclass = nvkm_client_child_get,
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/ioctl.c b/drivers/gpu/drm/nouveau/nvkm/core/ioctl.c
index 0b33287e43a7..45051a1249da 100644
--- a/drivers/gpu/drm/nouveau/nvkm/core/ioctl.c
+++ b/drivers/gpu/drm/nouveau/nvkm/core/ioctl.c
@@ -33,18 +33,7 @@ static int
nvkm_ioctl_nop(struct nvkm_client *client,
struct nvkm_object *object, void *data, u32 size)
{
- union {
- struct nvif_ioctl_nop_v0 v0;
- } *args = data;
- int ret = -ENOSYS;
-
- nvif_ioctl(object, "nop size %d\n", size);
- if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) {
- nvif_ioctl(object, "nop vers %lld\n", args->v0.version);
- args->v0.version = NVIF_VERSION_LATEST;
- }
-
- return ret;
+ return -ENOSYS;
}
#include <nvif/class.h>
@@ -112,10 +101,9 @@ nvkm_ioctl_new(struct nvkm_client *client,
nvif_ioctl(parent, "new size %d\n", size);
if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, true))) {
- nvif_ioctl(parent, "new vers %d handle %08x class %08x "
- "route %02x token %llx object %016llx\n",
+ nvif_ioctl(parent, "new vers %d handle %08x class %08x object %016llx\n",
args->v0.version, args->v0.handle, args->v0.oclass,
- args->v0.route, args->v0.token, args->v0.object);
+ args->v0.object);
} else
return ret;
@@ -127,8 +115,6 @@ nvkm_ioctl_new(struct nvkm_client *client,
do {
memset(&oclass, 0x00, sizeof(oclass));
oclass.handle = args->v0.handle;
- oclass.route = args->v0.route;
- oclass.token = args->v0.token;
oclass.object = args->v0.object;
oclass.client = client;
oclass.parent = parent;
@@ -205,69 +191,14 @@ static int
nvkm_ioctl_rd(struct nvkm_client *client,
struct nvkm_object *object, void *data, u32 size)
{
- union {
- struct nvif_ioctl_rd_v0 v0;
- } *args = data;
- union {
- u8 b08;
- u16 b16;
- u32 b32;
- } v;
- int ret = -ENOSYS;
-
- nvif_ioctl(object, "rd size %d\n", size);
- if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) {
- nvif_ioctl(object, "rd vers %d size %d addr %016llx\n",
- args->v0.version, args->v0.size, args->v0.addr);
- switch (args->v0.size) {
- case 1:
- ret = nvkm_object_rd08(object, args->v0.addr, &v.b08);
- args->v0.data = v.b08;
- break;
- case 2:
- ret = nvkm_object_rd16(object, args->v0.addr, &v.b16);
- args->v0.data = v.b16;
- break;
- case 4:
- ret = nvkm_object_rd32(object, args->v0.addr, &v.b32);
- args->v0.data = v.b32;
- break;
- default:
- ret = -EINVAL;
- break;
- }
- }
-
- return ret;
+ return -ENOSYS;
}
static int
nvkm_ioctl_wr(struct nvkm_client *client,
struct nvkm_object *object, void *data, u32 size)
{
- union {
- struct nvif_ioctl_wr_v0 v0;
- } *args = data;
- int ret = -ENOSYS;
-
- nvif_ioctl(object, "wr size %d\n", size);
- if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) {
- nvif_ioctl(object,
- "wr vers %d size %d addr %016llx data %08x\n",
- args->v0.version, args->v0.size, args->v0.addr,
- args->v0.data);
- } else
- return ret;
-
- switch (args->v0.size) {
- case 1: return nvkm_object_wr08(object, args->v0.addr, args->v0.data);
- case 2: return nvkm_object_wr16(object, args->v0.addr, args->v0.data);
- case 4: return nvkm_object_wr32(object, args->v0.addr, args->v0.data);
- default:
- break;
- }
-
- return -EINVAL;
+ return -ENOSYS;
}
static int
@@ -331,7 +262,7 @@ nvkm_ioctl_v0[] = {
static int
nvkm_ioctl_path(struct nvkm_client *client, u64 handle, u32 type,
- void *data, u32 size, u8 owner, u8 *route, u64 *token)
+ void *data, u32 size)
{
struct nvkm_object *object;
int ret;
@@ -342,13 +273,6 @@ nvkm_ioctl_path(struct nvkm_client *client, u64 handle, u32 type,
return PTR_ERR(object);
}
- if (owner != NVIF_IOCTL_V0_OWNER_ANY && owner != object->route) {
- nvif_ioctl(&client->object, "route != owner\n");
- return -EACCES;
- }
- *route = object->route;
- *token = object->token;
-
if (ret = -EINVAL, type < ARRAY_SIZE(nvkm_ioctl_v0)) {
if (nvkm_ioctl_v0[type].version == 0)
ret = nvkm_ioctl_v0[type].func(client, object, data, size);
@@ -374,8 +298,7 @@ nvkm_ioctl(struct nvkm_client *client, void *data, u32 size, void **hack)
args->v0.version, args->v0.type, args->v0.object,
args->v0.owner);
ret = nvkm_ioctl_path(client, args->v0.object, args->v0.type,
- data, size, args->v0.owner,
- &args->v0.route, &args->v0.token);
+ data, size);
}
if (ret != 1) {
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/object.c b/drivers/gpu/drm/nouveau/nvkm/core/object.c
index aea3ba72027a..390c265cf8af 100644
--- a/drivers/gpu/drm/nouveau/nvkm/core/object.c
+++ b/drivers/gpu/drm/nouveau/nvkm/core/object.c
@@ -133,54 +133,6 @@ nvkm_object_unmap(struct nvkm_object *object)
}
int
-nvkm_object_rd08(struct nvkm_object *object, u64 addr, u8 *data)
-{
- if (likely(object->func->rd08))
- return object->func->rd08(object, addr, data);
- return -ENODEV;
-}
-
-int
-nvkm_object_rd16(struct nvkm_object *object, u64 addr, u16 *data)
-{
- if (likely(object->func->rd16))
- return object->func->rd16(object, addr, data);
- return -ENODEV;
-}
-
-int
-nvkm_object_rd32(struct nvkm_object *object, u64 addr, u32 *data)
-{
- if (likely(object->func->rd32))
- return object->func->rd32(object, addr, data);
- return -ENODEV;
-}
-
-int
-nvkm_object_wr08(struct nvkm_object *object, u64 addr, u8 data)
-{
- if (likely(object->func->wr08))
- return object->func->wr08(object, addr, data);
- return -ENODEV;
-}
-
-int
-nvkm_object_wr16(struct nvkm_object *object, u64 addr, u16 data)
-{
- if (likely(object->func->wr16))
- return object->func->wr16(object, addr, data);
- return -ENODEV;
-}
-
-int
-nvkm_object_wr32(struct nvkm_object *object, u64 addr, u32 data)
-{
- if (likely(object->func->wr32))
- return object->func->wr32(object, addr, data);
- return -ENODEV;
-}
-
-int
nvkm_object_bind(struct nvkm_object *object, struct nvkm_gpuobj *gpuobj,
int align, struct nvkm_gpuobj **pgpuobj)
{
@@ -313,8 +265,6 @@ nvkm_object_ctor(const struct nvkm_object_func *func,
object->engine = nvkm_engine_ref(oclass->engine);
object->oclass = oclass->base.oclass;
object->handle = oclass->handle;
- object->route = oclass->route;
- object->token = oclass->token;
object->object = oclass->object;
INIT_LIST_HEAD(&object->head);
INIT_LIST_HEAD(&object->tree);
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/oproxy.c b/drivers/gpu/drm/nouveau/nvkm/core/oproxy.c
index 3385528da650..5db80d1780f0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/core/oproxy.c
+++ b/drivers/gpu/drm/nouveau/nvkm/core/oproxy.c
@@ -56,42 +56,6 @@ nvkm_oproxy_unmap(struct nvkm_object *object)
}
static int
-nvkm_oproxy_rd08(struct nvkm_object *object, u64 addr, u8 *data)
-{
- return nvkm_object_rd08(nvkm_oproxy(object)->object, addr, data);
-}
-
-static int
-nvkm_oproxy_rd16(struct nvkm_object *object, u64 addr, u16 *data)
-{
- return nvkm_object_rd16(nvkm_oproxy(object)->object, addr, data);
-}
-
-static int
-nvkm_oproxy_rd32(struct nvkm_object *object, u64 addr, u32 *data)
-{
- return nvkm_object_rd32(nvkm_oproxy(object)->object, addr, data);
-}
-
-static int
-nvkm_oproxy_wr08(struct nvkm_object *object, u64 addr, u8 data)
-{
- return nvkm_object_wr08(nvkm_oproxy(object)->object, addr, data);
-}
-
-static int
-nvkm_oproxy_wr16(struct nvkm_object *object, u64 addr, u16 data)
-{
- return nvkm_object_wr16(nvkm_oproxy(object)->object, addr, data);
-}
-
-static int
-nvkm_oproxy_wr32(struct nvkm_object *object, u64 addr, u32 data)
-{
- return nvkm_object_wr32(nvkm_oproxy(object)->object, addr, data);
-}
-
-static int
nvkm_oproxy_bind(struct nvkm_object *object, struct nvkm_gpuobj *parent,
int align, struct nvkm_gpuobj **pgpuobj)
{
@@ -197,12 +161,6 @@ nvkm_oproxy_func = {
.ntfy = nvkm_oproxy_ntfy,
.map = nvkm_oproxy_map,
.unmap = nvkm_oproxy_unmap,
- .rd08 = nvkm_oproxy_rd08,
- .rd16 = nvkm_oproxy_rd16,
- .rd32 = nvkm_oproxy_rd32,
- .wr08 = nvkm_oproxy_wr08,
- .wr16 = nvkm_oproxy_wr16,
- .wr32 = nvkm_oproxy_wr32,
.bind = nvkm_oproxy_bind,
.sclass = nvkm_oproxy_sclass,
.uevent = nvkm_oproxy_uevent,
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/uevent.c b/drivers/gpu/drm/nouveau/nvkm/core/uevent.c
index ba9d9edaec75..cc254c390a57 100644
--- a/drivers/gpu/drm/nouveau/nvkm/core/uevent.c
+++ b/drivers/gpu/drm/nouveau/nvkm/core/uevent.c
@@ -116,9 +116,9 @@ nvkm_uevent_ntfy(struct nvkm_event_ntfy *ntfy, u32 bits)
struct nvkm_client *client = uevent->object.client;
if (uevent->func)
- return uevent->func(uevent->parent, uevent->object.token, bits);
+ return uevent->func(uevent->parent, uevent->object.object, bits);
- return client->event(uevent->object.token, NULL, 0);
+ return client->event(uevent->object.object, NULL, 0);
}
int
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/Kbuild
index bfaaff645a34..2e48b0816670 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/Kbuild
@@ -19,7 +19,6 @@ include $(src)/nvkm/engine/nvenc/Kbuild
include $(src)/nvkm/engine/nvdec/Kbuild
include $(src)/nvkm/engine/nvjpg/Kbuild
include $(src)/nvkm/engine/ofa/Kbuild
-include $(src)/nvkm/engine/pm/Kbuild
include $(src)/nvkm/engine/sec/Kbuild
include $(src)/nvkm/engine/sec2/Kbuild
include $(src)/nvkm/engine/sw/Kbuild
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
index 31ed3da32fe7..9093d89b16f3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
@@ -53,26 +53,6 @@ nvkm_device_find(u64 handle)
return device;
}
-int
-nvkm_device_list(u64 *name, int size)
-{
- struct nvkm_device *device;
- int nr = 0;
- mutex_lock(&nv_devices_mutex);
- list_for_each_entry(device, &nv_devices, head) {
- if (nr++ < size)
- name[nr - 1] = device->handle;
- }
- mutex_unlock(&nv_devices_mutex);
- return nr;
-}
-
-static const struct nvkm_device_chip
-null_chipset = {
- .name = "NULL",
- .bios = { 0x00000001, nvkm_bios_new },
-};
-
static const struct nvkm_device_chip
nv4_chipset = {
.name = "NV04",
@@ -490,7 +470,6 @@ nv40_chipset = {
.fifo = { 0x00000001, nv40_fifo_new },
.gr = { 0x00000001, nv40_gr_new },
.mpeg = { 0x00000001, nv40_mpeg_new },
- .pm = { 0x00000001, nv40_pm_new },
.sw = { 0x00000001, nv10_sw_new },
};
@@ -516,7 +495,6 @@ nv41_chipset = {
.fifo = { 0x00000001, nv40_fifo_new },
.gr = { 0x00000001, nv40_gr_new },
.mpeg = { 0x00000001, nv40_mpeg_new },
- .pm = { 0x00000001, nv40_pm_new },
.sw = { 0x00000001, nv10_sw_new },
};
@@ -542,7 +520,6 @@ nv42_chipset = {
.fifo = { 0x00000001, nv40_fifo_new },
.gr = { 0x00000001, nv40_gr_new },
.mpeg = { 0x00000001, nv40_mpeg_new },
- .pm = { 0x00000001, nv40_pm_new },
.sw = { 0x00000001, nv10_sw_new },
};
@@ -568,7 +545,6 @@ nv43_chipset = {
.fifo = { 0x00000001, nv40_fifo_new },
.gr = { 0x00000001, nv40_gr_new },
.mpeg = { 0x00000001, nv40_mpeg_new },
- .pm = { 0x00000001, nv40_pm_new },
.sw = { 0x00000001, nv10_sw_new },
};
@@ -594,7 +570,6 @@ nv44_chipset = {
.fifo = { 0x00000001, nv40_fifo_new },
.gr = { 0x00000001, nv44_gr_new },
.mpeg = { 0x00000001, nv44_mpeg_new },
- .pm = { 0x00000001, nv40_pm_new },
.sw = { 0x00000001, nv10_sw_new },
};
@@ -620,7 +595,6 @@ nv45_chipset = {
.fifo = { 0x00000001, nv40_fifo_new },
.gr = { 0x00000001, nv40_gr_new },
.mpeg = { 0x00000001, nv44_mpeg_new },
- .pm = { 0x00000001, nv40_pm_new },
.sw = { 0x00000001, nv10_sw_new },
};
@@ -646,7 +620,6 @@ nv46_chipset = {
.fifo = { 0x00000001, nv40_fifo_new },
.gr = { 0x00000001, nv44_gr_new },
.mpeg = { 0x00000001, nv44_mpeg_new },
- .pm = { 0x00000001, nv40_pm_new },
.sw = { 0x00000001, nv10_sw_new },
};
@@ -672,7 +645,6 @@ nv47_chipset = {
.fifo = { 0x00000001, nv40_fifo_new },
.gr = { 0x00000001, nv40_gr_new },
.mpeg = { 0x00000001, nv44_mpeg_new },
- .pm = { 0x00000001, nv40_pm_new },
.sw = { 0x00000001, nv10_sw_new },
};
@@ -698,7 +670,6 @@ nv49_chipset = {
.fifo = { 0x00000001, nv40_fifo_new },
.gr = { 0x00000001, nv40_gr_new },
.mpeg = { 0x00000001, nv44_mpeg_new },
- .pm = { 0x00000001, nv40_pm_new },
.sw = { 0x00000001, nv10_sw_new },
};
@@ -724,7 +695,6 @@ nv4a_chipset = {
.fifo = { 0x00000001, nv40_fifo_new },
.gr = { 0x00000001, nv44_gr_new },
.mpeg = { 0x00000001, nv44_mpeg_new },
- .pm = { 0x00000001, nv40_pm_new },
.sw = { 0x00000001, nv10_sw_new },
};
@@ -750,7 +720,6 @@ nv4b_chipset = {
.fifo = { 0x00000001, nv40_fifo_new },
.gr = { 0x00000001, nv40_gr_new },
.mpeg = { 0x00000001, nv44_mpeg_new },
- .pm = { 0x00000001, nv40_pm_new },
.sw = { 0x00000001, nv10_sw_new },
};
@@ -776,7 +745,6 @@ nv4c_chipset = {
.fifo = { 0x00000001, nv40_fifo_new },
.gr = { 0x00000001, nv44_gr_new },
.mpeg = { 0x00000001, nv44_mpeg_new },
- .pm = { 0x00000001, nv40_pm_new },
.sw = { 0x00000001, nv10_sw_new },
};
@@ -802,7 +770,6 @@ nv4e_chipset = {
.fifo = { 0x00000001, nv40_fifo_new },
.gr = { 0x00000001, nv44_gr_new },
.mpeg = { 0x00000001, nv44_mpeg_new },
- .pm = { 0x00000001, nv40_pm_new },
.sw = { 0x00000001, nv10_sw_new },
};
@@ -831,7 +798,6 @@ nv50_chipset = {
.fifo = { 0x00000001, nv50_fifo_new },
.gr = { 0x00000001, nv50_gr_new },
.mpeg = { 0x00000001, nv50_mpeg_new },
- .pm = { 0x00000001, nv50_pm_new },
.sw = { 0x00000001, nv50_sw_new },
};
@@ -857,7 +823,6 @@ nv63_chipset = {
.fifo = { 0x00000001, nv40_fifo_new },
.gr = { 0x00000001, nv44_gr_new },
.mpeg = { 0x00000001, nv44_mpeg_new },
- .pm = { 0x00000001, nv40_pm_new },
.sw = { 0x00000001, nv10_sw_new },
};
@@ -883,7 +848,6 @@ nv67_chipset = {
.fifo = { 0x00000001, nv40_fifo_new },
.gr = { 0x00000001, nv44_gr_new },
.mpeg = { 0x00000001, nv44_mpeg_new },
- .pm = { 0x00000001, nv40_pm_new },
.sw = { 0x00000001, nv10_sw_new },
};
@@ -909,7 +873,6 @@ nv68_chipset = {
.fifo = { 0x00000001, nv40_fifo_new },
.gr = { 0x00000001, nv44_gr_new },
.mpeg = { 0x00000001, nv44_mpeg_new },
- .pm = { 0x00000001, nv40_pm_new },
.sw = { 0x00000001, nv10_sw_new },
};
@@ -940,7 +903,6 @@ nv84_chipset = {
.fifo = { 0x00000001, g84_fifo_new },
.gr = { 0x00000001, g84_gr_new },
.mpeg = { 0x00000001, g84_mpeg_new },
- .pm = { 0x00000001, g84_pm_new },
.sw = { 0x00000001, nv50_sw_new },
.vp = { 0x00000001, g84_vp_new },
};
@@ -972,7 +934,6 @@ nv86_chipset = {
.fifo = { 0x00000001, g84_fifo_new },
.gr = { 0x00000001, g84_gr_new },
.mpeg = { 0x00000001, g84_mpeg_new },
- .pm = { 0x00000001, g84_pm_new },
.sw = { 0x00000001, nv50_sw_new },
.vp = { 0x00000001, g84_vp_new },
};
@@ -1004,7 +965,6 @@ nv92_chipset = {
.fifo = { 0x00000001, g84_fifo_new },
.gr = { 0x00000001, g84_gr_new },
.mpeg = { 0x00000001, g84_mpeg_new },
- .pm = { 0x00000001, g84_pm_new },
.sw = { 0x00000001, nv50_sw_new },
.vp = { 0x00000001, g84_vp_new },
};
@@ -1036,7 +996,6 @@ nv94_chipset = {
.fifo = { 0x00000001, g84_fifo_new },
.gr = { 0x00000001, g84_gr_new },
.mpeg = { 0x00000001, g84_mpeg_new },
- .pm = { 0x00000001, g84_pm_new },
.sw = { 0x00000001, nv50_sw_new },
.vp = { 0x00000001, g84_vp_new },
};
@@ -1068,7 +1027,6 @@ nv96_chipset = {
.fifo = { 0x00000001, g84_fifo_new },
.gr = { 0x00000001, g84_gr_new },
.mpeg = { 0x00000001, g84_mpeg_new },
- .pm = { 0x00000001, g84_pm_new },
.sw = { 0x00000001, nv50_sw_new },
.vp = { 0x00000001, g84_vp_new },
};
@@ -1100,7 +1058,6 @@ nv98_chipset = {
.mspdec = { 0x00000001, g98_mspdec_new },
.msppp = { 0x00000001, g98_msppp_new },
.msvld = { 0x00000001, g98_msvld_new },
- .pm = { 0x00000001, g84_pm_new },
.sec = { 0x00000001, g98_sec_new },
.sw = { 0x00000001, nv50_sw_new },
};
@@ -1132,7 +1089,6 @@ nva0_chipset = {
.fifo = { 0x00000001, g84_fifo_new },
.gr = { 0x00000001, gt200_gr_new },
.mpeg = { 0x00000001, g84_mpeg_new },
- .pm = { 0x00000001, gt200_pm_new },
.sw = { 0x00000001, nv50_sw_new },
.vp = { 0x00000001, g84_vp_new },
};
@@ -1167,7 +1123,6 @@ nva3_chipset = {
.mspdec = { 0x00000001, gt215_mspdec_new },
.msppp = { 0x00000001, gt215_msppp_new },
.msvld = { 0x00000001, gt215_msvld_new },
- .pm = { 0x00000001, gt215_pm_new },
.sw = { 0x00000001, nv50_sw_new },
};
@@ -1200,7 +1155,6 @@ nva5_chipset = {
.mspdec = { 0x00000001, gt215_mspdec_new },
.msppp = { 0x00000001, gt215_msppp_new },
.msvld = { 0x00000001, gt215_msvld_new },
- .pm = { 0x00000001, gt215_pm_new },
.sw = { 0x00000001, nv50_sw_new },
};
@@ -1233,7 +1187,6 @@ nva8_chipset = {
.mspdec = { 0x00000001, gt215_mspdec_new },
.msppp = { 0x00000001, gt215_msppp_new },
.msvld = { 0x00000001, gt215_msvld_new },
- .pm = { 0x00000001, gt215_pm_new },
.sw = { 0x00000001, nv50_sw_new },
};
@@ -1264,7 +1217,6 @@ nvaa_chipset = {
.mspdec = { 0x00000001, g98_mspdec_new },
.msppp = { 0x00000001, g98_msppp_new },
.msvld = { 0x00000001, g98_msvld_new },
- .pm = { 0x00000001, g84_pm_new },
.sec = { 0x00000001, g98_sec_new },
.sw = { 0x00000001, nv50_sw_new },
};
@@ -1296,7 +1248,6 @@ nvac_chipset = {
.mspdec = { 0x00000001, g98_mspdec_new },
.msppp = { 0x00000001, g98_msppp_new },
.msvld = { 0x00000001, g98_msvld_new },
- .pm = { 0x00000001, g84_pm_new },
.sec = { 0x00000001, g98_sec_new },
.sw = { 0x00000001, nv50_sw_new },
};
@@ -1330,7 +1281,6 @@ nvaf_chipset = {
.mspdec = { 0x00000001, gt215_mspdec_new },
.msppp = { 0x00000001, gt215_msppp_new },
.msvld = { 0x00000001, mcp89_msvld_new },
- .pm = { 0x00000001, gt215_pm_new },
.sw = { 0x00000001, nv50_sw_new },
};
@@ -1366,7 +1316,6 @@ nvc0_chipset = {
.mspdec = { 0x00000001, gf100_mspdec_new },
.msppp = { 0x00000001, gf100_msppp_new },
.msvld = { 0x00000001, gf100_msvld_new },
- .pm = { 0x00000001, gf100_pm_new },
.sw = { 0x00000001, gf100_sw_new },
};
@@ -1402,7 +1351,6 @@ nvc1_chipset = {
.mspdec = { 0x00000001, gf100_mspdec_new },
.msppp = { 0x00000001, gf100_msppp_new },
.msvld = { 0x00000001, gf100_msvld_new },
- .pm = { 0x00000001, gf108_pm_new },
.sw = { 0x00000001, gf100_sw_new },
};
@@ -1438,7 +1386,6 @@ nvc3_chipset = {
.mspdec = { 0x00000001, gf100_mspdec_new },
.msppp = { 0x00000001, gf100_msppp_new },
.msvld = { 0x00000001, gf100_msvld_new },
- .pm = { 0x00000001, gf100_pm_new },
.sw = { 0x00000001, gf100_sw_new },
};
@@ -1474,7 +1421,6 @@ nvc4_chipset = {
.mspdec = { 0x00000001, gf100_mspdec_new },
.msppp = { 0x00000001, gf100_msppp_new },
.msvld = { 0x00000001, gf100_msvld_new },
- .pm = { 0x00000001, gf100_pm_new },
.sw = { 0x00000001, gf100_sw_new },
};
@@ -1510,7 +1456,6 @@ nvc8_chipset = {
.mspdec = { 0x00000001, gf100_mspdec_new },
.msppp = { 0x00000001, gf100_msppp_new },
.msvld = { 0x00000001, gf100_msvld_new },
- .pm = { 0x00000001, gf100_pm_new },
.sw = { 0x00000001, gf100_sw_new },
};
@@ -1546,7 +1491,6 @@ nvce_chipset = {
.mspdec = { 0x00000001, gf100_mspdec_new },
.msppp = { 0x00000001, gf100_msppp_new },
.msvld = { 0x00000001, gf100_msvld_new },
- .pm = { 0x00000001, gf100_pm_new },
.sw = { 0x00000001, gf100_sw_new },
};
@@ -1582,7 +1526,6 @@ nvcf_chipset = {
.mspdec = { 0x00000001, gf100_mspdec_new },
.msppp = { 0x00000001, gf100_msppp_new },
.msvld = { 0x00000001, gf100_msvld_new },
- .pm = { 0x00000001, gf100_pm_new },
.sw = { 0x00000001, gf100_sw_new },
};
@@ -1617,7 +1560,6 @@ nvd7_chipset = {
.mspdec = { 0x00000001, gf100_mspdec_new },
.msppp = { 0x00000001, gf100_msppp_new },
.msvld = { 0x00000001, gf100_msvld_new },
- .pm = { 0x00000001, gf117_pm_new },
.sw = { 0x00000001, gf100_sw_new },
};
@@ -1653,7 +1595,6 @@ nvd9_chipset = {
.mspdec = { 0x00000001, gf100_mspdec_new },
.msppp = { 0x00000001, gf100_msppp_new },
.msvld = { 0x00000001, gf100_msvld_new },
- .pm = { 0x00000001, gf117_pm_new },
.sw = { 0x00000001, gf100_sw_new },
};
@@ -1690,7 +1631,6 @@ nve4_chipset = {
.mspdec = { 0x00000001, gk104_mspdec_new },
.msppp = { 0x00000001, gf100_msppp_new },
.msvld = { 0x00000001, gk104_msvld_new },
- .pm = { 0x00000001, gk104_pm_new },
.sw = { 0x00000001, gf100_sw_new },
};
@@ -1727,7 +1667,6 @@ nve6_chipset = {
.mspdec = { 0x00000001, gk104_mspdec_new },
.msppp = { 0x00000001, gf100_msppp_new },
.msvld = { 0x00000001, gk104_msvld_new },
- .pm = { 0x00000001, gk104_pm_new },
.sw = { 0x00000001, gf100_sw_new },
};
@@ -1764,7 +1703,6 @@ nve7_chipset = {
.mspdec = { 0x00000001, gk104_mspdec_new },
.msppp = { 0x00000001, gf100_msppp_new },
.msvld = { 0x00000001, gk104_msvld_new },
- .pm = { 0x00000001, gk104_pm_new },
.sw = { 0x00000001, gf100_sw_new },
};
@@ -1789,7 +1727,6 @@ nvea_chipset = {
.dma = { 0x00000001, gf119_dma_new },
.fifo = { 0x00000001, gk20a_fifo_new },
.gr = { 0x00000001, gk20a_gr_new },
- .pm = { 0x00000001, gk104_pm_new },
.sw = { 0x00000001, gf100_sw_new },
};
@@ -3104,7 +3041,6 @@ nvkm_device_ctor(const struct nvkm_device_func *func,
const struct nvkm_device_quirk *quirk,
struct device *dev, enum nvkm_device_type type, u64 handle,
const char *name, const char *cfg, const char *dbg,
- bool detect, bool mmio, u64 subdev_mask,
struct nvkm_device *device)
{
struct nvkm_subdev *subdev;
@@ -3132,233 +3068,228 @@ nvkm_device_ctor(const struct nvkm_device_func *func,
mmio_base = device->func->resource_addr(device, 0);
mmio_size = device->func->resource_size(device, 0);
- if (detect || mmio) {
- device->pri = ioremap(mmio_base, mmio_size);
- if (device->pri == NULL) {
- nvdev_error(device, "unable to map PRI\n");
- ret = -ENOMEM;
- goto done;
- }
+ device->pri = ioremap(mmio_base, mmio_size);
+ if (device->pri == NULL) {
+ nvdev_error(device, "unable to map PRI\n");
+ ret = -ENOMEM;
+ goto done;
}
/* identify the chipset, and determine classes of subdev/engines */
- if (detect) {
- /* switch mmio to cpu's native endianness */
- if (!nvkm_device_endianness(device)) {
- nvdev_error(device,
- "Couldn't switch GPU to CPUs endianness\n");
- ret = -ENOSYS;
- goto done;
- }
- boot0 = nvkm_rd32(device, 0x000000);
-
- /* chipset can be overridden for devel/testing purposes */
- chipset = nvkm_longopt(device->cfgopt, "NvChipset", 0);
- if (chipset) {
- u32 override_boot0;
-
- if (chipset >= 0x10) {
- override_boot0 = ((chipset & 0x1ff) << 20);
- override_boot0 |= 0x000000a1;
- } else {
- if (chipset != 0x04)
- override_boot0 = 0x20104000;
- else
- override_boot0 = 0x20004000;
- }
+ /* switch mmio to cpu's native endianness */
+ if (!nvkm_device_endianness(device)) {
+ nvdev_error(device,
+ "Couldn't switch GPU to CPUs endianness\n");
+ ret = -ENOSYS;
+ goto done;
+ }
- nvdev_warn(device, "CHIPSET OVERRIDE: %08x -> %08x\n",
- boot0, override_boot0);
- boot0 = override_boot0;
- }
+ boot0 = nvkm_rd32(device, 0x000000);
- /* determine chipset and derive architecture from it */
- if ((boot0 & 0x1f000000) > 0) {
- device->chipset = (boot0 & 0x1ff00000) >> 20;
- device->chiprev = (boot0 & 0x000000ff);
- switch (device->chipset & 0x1f0) {
- case 0x010: {
- if (0x461 & (1 << (device->chipset & 0xf)))
- device->card_type = NV_10;
- else
- device->card_type = NV_11;
- device->chiprev = 0x00;
- break;
- }
- case 0x020: device->card_type = NV_20; break;
- case 0x030: device->card_type = NV_30; break;
- case 0x040:
- case 0x060: device->card_type = NV_40; break;
- case 0x050:
- case 0x080:
- case 0x090:
- case 0x0a0: device->card_type = NV_50; break;
- case 0x0c0:
- case 0x0d0: device->card_type = NV_C0; break;
- case 0x0e0:
- case 0x0f0:
- case 0x100: device->card_type = NV_E0; break;
- case 0x110:
- case 0x120: device->card_type = GM100; break;
- case 0x130: device->card_type = GP100; break;
- case 0x140: device->card_type = GV100; break;
- case 0x160: device->card_type = TU100; break;
- case 0x170: device->card_type = GA100; break;
- case 0x190: device->card_type = AD100; break;
- default:
- break;
- }
- } else
- if ((boot0 & 0xff00fff0) == 0x20004000) {
- if (boot0 & 0x00f00000)
- device->chipset = 0x05;
+ /* chipset can be overridden for devel/testing purposes */
+ chipset = nvkm_longopt(device->cfgopt, "NvChipset", 0);
+ if (chipset) {
+ u32 override_boot0;
+
+ if (chipset >= 0x10) {
+ override_boot0 = ((chipset & 0x1ff) << 20);
+ override_boot0 |= 0x000000a1;
+ } else {
+ if (chipset != 0x04)
+ override_boot0 = 0x20104000;
else
- device->chipset = 0x04;
- device->card_type = NV_04;
+ override_boot0 = 0x20004000;
}
- switch (device->chipset) {
- case 0x004: device->chip = &nv4_chipset; break;
- case 0x005: device->chip = &nv5_chipset; break;
- case 0x010: device->chip = &nv10_chipset; break;
- case 0x011: device->chip = &nv11_chipset; break;
- case 0x015: device->chip = &nv15_chipset; break;
- case 0x017: device->chip = &nv17_chipset; break;
- case 0x018: device->chip = &nv18_chipset; break;
- case 0x01a: device->chip = &nv1a_chipset; break;
- case 0x01f: device->chip = &nv1f_chipset; break;
- case 0x020: device->chip = &nv20_chipset; break;
- case 0x025: device->chip = &nv25_chipset; break;
- case 0x028: device->chip = &nv28_chipset; break;
- case 0x02a: device->chip = &nv2a_chipset; break;
- case 0x030: device->chip = &nv30_chipset; break;
- case 0x031: device->chip = &nv31_chipset; break;
- case 0x034: device->chip = &nv34_chipset; break;
- case 0x035: device->chip = &nv35_chipset; break;
- case 0x036: device->chip = &nv36_chipset; break;
- case 0x040: device->chip = &nv40_chipset; break;
- case 0x041: device->chip = &nv41_chipset; break;
- case 0x042: device->chip = &nv42_chipset; break;
- case 0x043: device->chip = &nv43_chipset; break;
- case 0x044: device->chip = &nv44_chipset; break;
- case 0x045: device->chip = &nv45_chipset; break;
- case 0x046: device->chip = &nv46_chipset; break;
- case 0x047: device->chip = &nv47_chipset; break;
- case 0x049: device->chip = &nv49_chipset; break;
- case 0x04a: device->chip = &nv4a_chipset; break;
- case 0x04b: device->chip = &nv4b_chipset; break;
- case 0x04c: device->chip = &nv4c_chipset; break;
- case 0x04e: device->chip = &nv4e_chipset; break;
- case 0x050: device->chip = &nv50_chipset; break;
- case 0x063: device->chip = &nv63_chipset; break;
- case 0x067: device->chip = &nv67_chipset; break;
- case 0x068: device->chip = &nv68_chipset; break;
- case 0x084: device->chip = &nv84_chipset; break;
- case 0x086: device->chip = &nv86_chipset; break;
- case 0x092: device->chip = &nv92_chipset; break;
- case 0x094: device->chip = &nv94_chipset; break;
- case 0x096: device->chip = &nv96_chipset; break;
- case 0x098: device->chip = &nv98_chipset; break;
- case 0x0a0: device->chip = &nva0_chipset; break;
- case 0x0a3: device->chip = &nva3_chipset; break;
- case 0x0a5: device->chip = &nva5_chipset; break;
- case 0x0a8: device->chip = &nva8_chipset; break;
- case 0x0aa: device->chip = &nvaa_chipset; break;
- case 0x0ac: device->chip = &nvac_chipset; break;
- case 0x0af: device->chip = &nvaf_chipset; break;
- case 0x0c0: device->chip = &nvc0_chipset; break;
- case 0x0c1: device->chip = &nvc1_chipset; break;
- case 0x0c3: device->chip = &nvc3_chipset; break;
- case 0x0c4: device->chip = &nvc4_chipset; break;
- case 0x0c8: device->chip = &nvc8_chipset; break;
- case 0x0ce: device->chip = &nvce_chipset; break;
- case 0x0cf: device->chip = &nvcf_chipset; break;
- case 0x0d7: device->chip = &nvd7_chipset; break;
- case 0x0d9: device->chip = &nvd9_chipset; break;
- case 0x0e4: device->chip = &nve4_chipset; break;
- case 0x0e6: device->chip = &nve6_chipset; break;
- case 0x0e7: device->chip = &nve7_chipset; break;
- case 0x0ea: device->chip = &nvea_chipset; break;
- case 0x0f0: device->chip = &nvf0_chipset; break;
- case 0x0f1: device->chip = &nvf1_chipset; break;
- case 0x106: device->chip = &nv106_chipset; break;
- case 0x108: device->chip = &nv108_chipset; break;
- case 0x117: device->chip = &nv117_chipset; break;
- case 0x118: device->chip = &nv118_chipset; break;
- case 0x120: device->chip = &nv120_chipset; break;
- case 0x124: device->chip = &nv124_chipset; break;
- case 0x126: device->chip = &nv126_chipset; break;
- case 0x12b: device->chip = &nv12b_chipset; break;
- case 0x130: device->chip = &nv130_chipset; break;
- case 0x132: device->chip = &nv132_chipset; break;
- case 0x134: device->chip = &nv134_chipset; break;
- case 0x136: device->chip = &nv136_chipset; break;
- case 0x137: device->chip = &nv137_chipset; break;
- case 0x138: device->chip = &nv138_chipset; break;
- case 0x13b: device->chip = &nv13b_chipset; break;
- case 0x140: device->chip = &nv140_chipset; break;
- case 0x162: device->chip = &nv162_chipset; break;
- case 0x164: device->chip = &nv164_chipset; break;
- case 0x166: device->chip = &nv166_chipset; break;
- case 0x167: device->chip = &nv167_chipset; break;
- case 0x168: device->chip = &nv168_chipset; break;
- case 0x172: device->chip = &nv172_chipset; break;
- case 0x173: device->chip = &nv173_chipset; break;
- case 0x174: device->chip = &nv174_chipset; break;
- case 0x176: device->chip = &nv176_chipset; break;
- case 0x177: device->chip = &nv177_chipset; break;
- case 0x192: device->chip = &nv192_chipset; break;
- case 0x193: device->chip = &nv193_chipset; break;
- case 0x194: device->chip = &nv194_chipset; break;
- case 0x196: device->chip = &nv196_chipset; break;
- case 0x197: device->chip = &nv197_chipset; break;
- default:
- if (nvkm_boolopt(device->cfgopt, "NvEnableUnsupportedChipsets", false)) {
- switch (device->chipset) {
- case 0x170: device->chip = &nv170_chipset; break;
- default:
- break;
- }
- }
+ nvdev_warn(device, "CHIPSET OVERRIDE: %08x -> %08x\n",
+ boot0, override_boot0);
+ boot0 = override_boot0;
+ }
- if (!device->chip) {
- nvdev_error(device, "unknown chipset (%08x)\n", boot0);
- ret = -ENODEV;
- goto done;
- }
+ /* determine chipset and derive architecture from it */
+ if ((boot0 & 0x1f000000) > 0) {
+ device->chipset = (boot0 & 0x1ff00000) >> 20;
+ device->chiprev = (boot0 & 0x000000ff);
+ switch (device->chipset & 0x1f0) {
+ case 0x010: {
+ if (0x461 & (1 << (device->chipset & 0xf)))
+ device->card_type = NV_10;
+ else
+ device->card_type = NV_11;
+ device->chiprev = 0x00;
+ break;
+ }
+ case 0x020: device->card_type = NV_20; break;
+ case 0x030: device->card_type = NV_30; break;
+ case 0x040:
+ case 0x060: device->card_type = NV_40; break;
+ case 0x050:
+ case 0x080:
+ case 0x090:
+ case 0x0a0: device->card_type = NV_50; break;
+ case 0x0c0:
+ case 0x0d0: device->card_type = NV_C0; break;
+ case 0x0e0:
+ case 0x0f0:
+ case 0x100: device->card_type = NV_E0; break;
+ case 0x110:
+ case 0x120: device->card_type = GM100; break;
+ case 0x130: device->card_type = GP100; break;
+ case 0x140: device->card_type = GV100; break;
+ case 0x160: device->card_type = TU100; break;
+ case 0x170: device->card_type = GA100; break;
+ case 0x190: device->card_type = AD100; break;
+ default:
break;
}
+ } else
+ if ((boot0 & 0xff00fff0) == 0x20004000) {
+ if (boot0 & 0x00f00000)
+ device->chipset = 0x05;
+ else
+ device->chipset = 0x04;
+ device->card_type = NV_04;
+ }
- nvdev_info(device, "NVIDIA %s (%08x)\n",
- device->chip->name, boot0);
+ switch (device->chipset) {
+ case 0x004: device->chip = &nv4_chipset; break;
+ case 0x005: device->chip = &nv5_chipset; break;
+ case 0x010: device->chip = &nv10_chipset; break;
+ case 0x011: device->chip = &nv11_chipset; break;
+ case 0x015: device->chip = &nv15_chipset; break;
+ case 0x017: device->chip = &nv17_chipset; break;
+ case 0x018: device->chip = &nv18_chipset; break;
+ case 0x01a: device->chip = &nv1a_chipset; break;
+ case 0x01f: device->chip = &nv1f_chipset; break;
+ case 0x020: device->chip = &nv20_chipset; break;
+ case 0x025: device->chip = &nv25_chipset; break;
+ case 0x028: device->chip = &nv28_chipset; break;
+ case 0x02a: device->chip = &nv2a_chipset; break;
+ case 0x030: device->chip = &nv30_chipset; break;
+ case 0x031: device->chip = &nv31_chipset; break;
+ case 0x034: device->chip = &nv34_chipset; break;
+ case 0x035: device->chip = &nv35_chipset; break;
+ case 0x036: device->chip = &nv36_chipset; break;
+ case 0x040: device->chip = &nv40_chipset; break;
+ case 0x041: device->chip = &nv41_chipset; break;
+ case 0x042: device->chip = &nv42_chipset; break;
+ case 0x043: device->chip = &nv43_chipset; break;
+ case 0x044: device->chip = &nv44_chipset; break;
+ case 0x045: device->chip = &nv45_chipset; break;
+ case 0x046: device->chip = &nv46_chipset; break;
+ case 0x047: device->chip = &nv47_chipset; break;
+ case 0x049: device->chip = &nv49_chipset; break;
+ case 0x04a: device->chip = &nv4a_chipset; break;
+ case 0x04b: device->chip = &nv4b_chipset; break;
+ case 0x04c: device->chip = &nv4c_chipset; break;
+ case 0x04e: device->chip = &nv4e_chipset; break;
+ case 0x050: device->chip = &nv50_chipset; break;
+ case 0x063: device->chip = &nv63_chipset; break;
+ case 0x067: device->chip = &nv67_chipset; break;
+ case 0x068: device->chip = &nv68_chipset; break;
+ case 0x084: device->chip = &nv84_chipset; break;
+ case 0x086: device->chip = &nv86_chipset; break;
+ case 0x092: device->chip = &nv92_chipset; break;
+ case 0x094: device->chip = &nv94_chipset; break;
+ case 0x096: device->chip = &nv96_chipset; break;
+ case 0x098: device->chip = &nv98_chipset; break;
+ case 0x0a0: device->chip = &nva0_chipset; break;
+ case 0x0a3: device->chip = &nva3_chipset; break;
+ case 0x0a5: device->chip = &nva5_chipset; break;
+ case 0x0a8: device->chip = &nva8_chipset; break;
+ case 0x0aa: device->chip = &nvaa_chipset; break;
+ case 0x0ac: device->chip = &nvac_chipset; break;
+ case 0x0af: device->chip = &nvaf_chipset; break;
+ case 0x0c0: device->chip = &nvc0_chipset; break;
+ case 0x0c1: device->chip = &nvc1_chipset; break;
+ case 0x0c3: device->chip = &nvc3_chipset; break;
+ case 0x0c4: device->chip = &nvc4_chipset; break;
+ case 0x0c8: device->chip = &nvc8_chipset; break;
+ case 0x0ce: device->chip = &nvce_chipset; break;
+ case 0x0cf: device->chip = &nvcf_chipset; break;
+ case 0x0d7: device->chip = &nvd7_chipset; break;
+ case 0x0d9: device->chip = &nvd9_chipset; break;
+ case 0x0e4: device->chip = &nve4_chipset; break;
+ case 0x0e6: device->chip = &nve6_chipset; break;
+ case 0x0e7: device->chip = &nve7_chipset; break;
+ case 0x0ea: device->chip = &nvea_chipset; break;
+ case 0x0f0: device->chip = &nvf0_chipset; break;
+ case 0x0f1: device->chip = &nvf1_chipset; break;
+ case 0x106: device->chip = &nv106_chipset; break;
+ case 0x108: device->chip = &nv108_chipset; break;
+ case 0x117: device->chip = &nv117_chipset; break;
+ case 0x118: device->chip = &nv118_chipset; break;
+ case 0x120: device->chip = &nv120_chipset; break;
+ case 0x124: device->chip = &nv124_chipset; break;
+ case 0x126: device->chip = &nv126_chipset; break;
+ case 0x12b: device->chip = &nv12b_chipset; break;
+ case 0x130: device->chip = &nv130_chipset; break;
+ case 0x132: device->chip = &nv132_chipset; break;
+ case 0x134: device->chip = &nv134_chipset; break;
+ case 0x136: device->chip = &nv136_chipset; break;
+ case 0x137: device->chip = &nv137_chipset; break;
+ case 0x138: device->chip = &nv138_chipset; break;
+ case 0x13b: device->chip = &nv13b_chipset; break;
+ case 0x140: device->chip = &nv140_chipset; break;
+ case 0x162: device->chip = &nv162_chipset; break;
+ case 0x164: device->chip = &nv164_chipset; break;
+ case 0x166: device->chip = &nv166_chipset; break;
+ case 0x167: device->chip = &nv167_chipset; break;
+ case 0x168: device->chip = &nv168_chipset; break;
+ case 0x172: device->chip = &nv172_chipset; break;
+ case 0x173: device->chip = &nv173_chipset; break;
+ case 0x174: device->chip = &nv174_chipset; break;
+ case 0x176: device->chip = &nv176_chipset; break;
+ case 0x177: device->chip = &nv177_chipset; break;
+ case 0x192: device->chip = &nv192_chipset; break;
+ case 0x193: device->chip = &nv193_chipset; break;
+ case 0x194: device->chip = &nv194_chipset; break;
+ case 0x196: device->chip = &nv196_chipset; break;
+ case 0x197: device->chip = &nv197_chipset; break;
+ default:
+ if (nvkm_boolopt(device->cfgopt, "NvEnableUnsupportedChipsets", false)) {
+ switch (device->chipset) {
+ case 0x170: device->chip = &nv170_chipset; break;
+ default:
+ break;
+ }
+ }
- /* vGPU detection */
- boot1 = nvkm_rd32(device, 0x0000004);
- if (device->card_type >= TU100 && (boot1 & 0x00030000)) {
- nvdev_info(device, "vGPUs are not supported\n");
+ if (!device->chip) {
+ nvdev_error(device, "unknown chipset (%08x)\n", boot0);
ret = -ENODEV;
goto done;
}
+ break;
+ }
- /* read strapping information */
- strap = nvkm_rd32(device, 0x101000);
+ nvdev_info(device, "NVIDIA %s (%08x)\n",
+ device->chip->name, boot0);
- /* determine frequency of timing crystal */
- if ( device->card_type <= NV_10 || device->chipset < 0x17 ||
- (device->chipset >= 0x20 && device->chipset < 0x25))
- strap &= 0x00000040;
- else
- strap &= 0x00400040;
+ /* vGPU detection */
+ boot1 = nvkm_rd32(device, 0x0000004);
+ if (device->card_type >= TU100 && (boot1 & 0x00030000)) {
+ nvdev_info(device, "vGPUs are not supported\n");
+ ret = -ENODEV;
+ goto done;
+ }
- switch (strap) {
- case 0x00000000: device->crystal = 13500; break;
- case 0x00000040: device->crystal = 14318; break;
- case 0x00400000: device->crystal = 27000; break;
- case 0x00400040: device->crystal = 25000; break;
- }
- } else {
- device->chip = &null_chipset;
+ /* read strapping information */
+ strap = nvkm_rd32(device, 0x101000);
+
+ /* determine frequency of timing crystal */
+ if ( device->card_type <= NV_10 || device->chipset < 0x17 ||
+ (device->chipset >= 0x20 && device->chipset < 0x25))
+ strap &= 0x00000040;
+ else
+ strap &= 0x00400040;
+
+ switch (strap) {
+ case 0x00000000: device->crystal = 13500; break;
+ case 0x00000040: device->crystal = 14318; break;
+ case 0x00400000: device->crystal = 27000; break;
+ case 0x00400040: device->crystal = 25000; break;
}
if (!device->name)
@@ -3368,7 +3299,7 @@ nvkm_device_ctor(const struct nvkm_device_func *func,
nvkm_intr_ctor(device);
#define NVKM_LAYOUT_ONCE(type,data,ptr) \
- if (device->chip->ptr.inst && (subdev_mask & (BIT_ULL(type)))) { \
+ if (device->chip->ptr.inst) { \
WARN_ON(device->chip->ptr.inst != 0x00000001); \
ret = device->chip->ptr.ctor(device, (type), -1, &device->ptr); \
subdev = nvkm_device_subdev(device, (type), 0); \
@@ -3387,7 +3318,7 @@ nvkm_device_ctor(const struct nvkm_device_func *func,
#define NVKM_LAYOUT_INST(type,data,ptr,cnt) \
WARN_ON(device->chip->ptr.inst & ~((1 << ARRAY_SIZE(device->ptr)) - 1)); \
for (j = 0; device->chip->ptr.inst && j < ARRAY_SIZE(device->ptr); j++) { \
- if ((device->chip->ptr.inst & BIT(j)) && (subdev_mask & BIT_ULL(type))) { \
+ if (device->chip->ptr.inst & BIT(j)) { \
ret = device->chip->ptr.ctor(device, (type), (j), &device->ptr[j]); \
subdev = nvkm_device_subdev(device, (type), (j)); \
if (ret) { \
@@ -3409,7 +3340,7 @@ nvkm_device_ctor(const struct nvkm_device_func *func,
ret = nvkm_intr_install(device);
done:
- if (device->pri && (!mmio || ret)) {
+ if (ret && device->pri) {
iounmap(device->pri);
device->pri = NULL;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c
index abccb2bb68a6..3ff6436007fa 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c
@@ -1626,7 +1626,6 @@ nvkm_device_pci_func = {
int
nvkm_device_pci_new(struct pci_dev *pci_dev, const char *cfg, const char *dbg,
- bool detect, bool mmio, u64 subdev_mask,
struct nvkm_device **pdevice)
{
const struct nvkm_device_quirk *quirk = NULL;
@@ -1680,8 +1679,7 @@ nvkm_device_pci_new(struct pci_dev *pci_dev, const char *cfg, const char *dbg,
pci_dev->bus->number << 16 |
PCI_SLOT(pci_dev->devfn) << 8 |
PCI_FUNC(pci_dev->devfn), name,
- cfg, dbg, detect, mmio, subdev_mask,
- &pdev->device);
+ cfg, dbg, &pdev->device);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/device/priv.h
index bf3176bec18a..e42b18820a95 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/priv.h
@@ -45,7 +45,6 @@
#include <engine/nvdec.h>
#include <engine/nvjpg.h>
#include <engine/ofa.h>
-#include <engine/pm.h>
#include <engine/sec.h>
#include <engine/sec2.h>
#include <engine/sw.h>
@@ -56,7 +55,6 @@ int nvkm_device_ctor(const struct nvkm_device_func *,
const struct nvkm_device_quirk *,
struct device *, enum nvkm_device_type, u64 handle,
const char *name, const char *cfg, const char *dbg,
- bool detect, bool mmio, u64 subdev_mask,
struct nvkm_device *);
int nvkm_device_init(struct nvkm_device *);
int nvkm_device_fini(struct nvkm_device *, bool suspend);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
index 87caa4a72921..d1c294f00665 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
@@ -237,7 +237,6 @@ int
nvkm_device_tegra_new(const struct nvkm_device_tegra_func *func,
struct platform_device *pdev,
const char *cfg, const char *dbg,
- bool detect, bool mmio, u64 subdev_mask,
struct nvkm_device **pdevice)
{
struct nvkm_device_tegra *tdev;
@@ -311,8 +310,7 @@ nvkm_device_tegra_new(const struct nvkm_device_tegra_func *func,
tdev->gpu_speedo_id = tegra_sku_info.gpu_speedo_id;
ret = nvkm_device_ctor(&nvkm_device_tegra_func, NULL, &pdev->dev,
NVKM_DEVICE_TEGRA, pdev->id, NULL,
- cfg, dbg, detect, mmio, subdev_mask,
- &tdev->device);
+ cfg, dbg, &tdev->device);
if (ret)
goto powerdown;
@@ -333,7 +331,6 @@ int
nvkm_device_tegra_new(const struct nvkm_device_tegra_func *func,
struct platform_device *pdev,
const char *cfg, const char *dbg,
- bool detect, bool mmio, u64 subdev_mask,
struct nvkm_device **pdevice)
{
return -ENOSYS;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c
index 7fd4800a876a..d7f75b3a43c8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c
@@ -203,54 +203,6 @@ nvkm_udevice_mthd(struct nvkm_object *object, u32 mthd, void *data, u32 size)
}
static int
-nvkm_udevice_rd08(struct nvkm_object *object, u64 addr, u8 *data)
-{
- struct nvkm_udevice *udev = nvkm_udevice(object);
- *data = nvkm_rd08(udev->device, addr);
- return 0;
-}
-
-static int
-nvkm_udevice_rd16(struct nvkm_object *object, u64 addr, u16 *data)
-{
- struct nvkm_udevice *udev = nvkm_udevice(object);
- *data = nvkm_rd16(udev->device, addr);
- return 0;
-}
-
-static int
-nvkm_udevice_rd32(struct nvkm_object *object, u64 addr, u32 *data)
-{
- struct nvkm_udevice *udev = nvkm_udevice(object);
- *data = nvkm_rd32(udev->device, addr);
- return 0;
-}
-
-static int
-nvkm_udevice_wr08(struct nvkm_object *object, u64 addr, u8 data)
-{
- struct nvkm_udevice *udev = nvkm_udevice(object);
- nvkm_wr08(udev->device, addr, data);
- return 0;
-}
-
-static int
-nvkm_udevice_wr16(struct nvkm_object *object, u64 addr, u16 data)
-{
- struct nvkm_udevice *udev = nvkm_udevice(object);
- nvkm_wr16(udev->device, addr, data);
- return 0;
-}
-
-static int
-nvkm_udevice_wr32(struct nvkm_object *object, u64 addr, u32 data)
-{
- struct nvkm_udevice *udev = nvkm_udevice(object);
- nvkm_wr32(udev->device, addr, data);
- return 0;
-}
-
-static int
nvkm_udevice_map(struct nvkm_object *object, void *argv, u32 argc,
enum nvkm_object_map *type, u64 *addr, u64 *size)
{
@@ -322,8 +274,7 @@ nvkm_udevice_child_get(struct nvkm_object *object, int index,
struct nvkm_engine *engine;
u64 mask = (1ULL << NVKM_ENGINE_DMAOBJ) |
(1ULL << NVKM_ENGINE_FIFO) |
- (1ULL << NVKM_ENGINE_DISP) |
- (1ULL << NVKM_ENGINE_PM);
+ (1ULL << NVKM_ENGINE_DISP);
const struct nvkm_device_oclass *sclass = NULL;
int i;
@@ -358,25 +309,11 @@ nvkm_udevice_child_get(struct nvkm_object *object, int index,
}
static const struct nvkm_object_func
-nvkm_udevice_super = {
- .init = nvkm_udevice_init,
- .fini = nvkm_udevice_fini,
- .mthd = nvkm_udevice_mthd,
- .map = nvkm_udevice_map,
- .rd08 = nvkm_udevice_rd08,
- .rd16 = nvkm_udevice_rd16,
- .rd32 = nvkm_udevice_rd32,
- .wr08 = nvkm_udevice_wr08,
- .wr16 = nvkm_udevice_wr16,
- .wr32 = nvkm_udevice_wr32,
- .sclass = nvkm_udevice_child_get,
-};
-
-static const struct nvkm_object_func
nvkm_udevice = {
.init = nvkm_udevice_init,
.fini = nvkm_udevice_fini,
.mthd = nvkm_udevice_mthd,
+ .map = nvkm_udevice_map,
.sclass = nvkm_udevice_child_get,
};
@@ -384,38 +321,16 @@ static int
nvkm_udevice_new(const struct nvkm_oclass *oclass, void *data, u32 size,
struct nvkm_object **pobject)
{
- union {
- struct nv_device_v0 v0;
- } *args = data;
struct nvkm_client *client = oclass->client;
- struct nvkm_object *parent = &client->object;
- const struct nvkm_object_func *func;
struct nvkm_udevice *udev;
- int ret = -ENOSYS;
-
- nvif_ioctl(parent, "create device size %d\n", size);
- if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) {
- nvif_ioctl(parent, "create device v%d device %016llx\n",
- args->v0.version, args->v0.device);
- } else
- return ret;
-
- /* give priviledged clients register access */
- if (args->v0.priv)
- func = &nvkm_udevice_super;
- else
- func = &nvkm_udevice;
if (!(udev = kzalloc(sizeof(*udev), GFP_KERNEL)))
return -ENOMEM;
- nvkm_object_ctor(func, oclass, &udev->object);
+ nvkm_object_ctor(&nvkm_udevice, oclass, &udev->object);
*pobject = &udev->object;
/* find the device that matches what the client requested */
- if (args->v0.device != ~0)
- udev->device = nvkm_device_find(args->v0.device);
- else
- udev->device = nvkm_device_find(client->device);
+ udev->device = nvkm_device_find(client->device);
if (!udev->device)
return -ENODEV;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/chan.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/chan.c
index d5e18daed79f..4e43ee383c34 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/chan.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/chan.c
@@ -27,28 +27,6 @@
#include <nvif/if0014.h>
static int
-nvkm_disp_chan_rd32(struct nvkm_object *object, u64 addr, u32 *data)
-{
- struct nvkm_disp_chan *chan = nvkm_disp_chan(object);
- struct nvkm_device *device = chan->disp->engine.subdev.device;
- u64 size, base = chan->func->user(chan, &size);
-
- *data = nvkm_rd32(device, base + addr);
- return 0;
-}
-
-static int
-nvkm_disp_chan_wr32(struct nvkm_object *object, u64 addr, u32 data)
-{
- struct nvkm_disp_chan *chan = nvkm_disp_chan(object);
- struct nvkm_device *device = chan->disp->engine.subdev.device;
- u64 size, base = chan->func->user(chan, &size);
-
- nvkm_wr32(device, base + addr, data);
- return 0;
-}
-
-static int
nvkm_disp_chan_ntfy(struct nvkm_object *object, u32 type, struct nvkm_event **pevent)
{
struct nvkm_disp_chan *chan = nvkm_disp_chan(object);
@@ -188,8 +166,6 @@ nvkm_disp_chan = {
.dtor = nvkm_disp_chan_dtor,
.init = nvkm_disp_chan_init,
.fini = nvkm_disp_chan_fini,
- .rd32 = nvkm_disp_chan_rd32,
- .wr32 = nvkm_disp_chan_wr32,
.ntfy = nvkm_disp_chan_ntfy,
.map = nvkm_disp_chan_map,
.sclass = nvkm_disp_chan_child_get,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/pm/Kbuild
deleted file mode 100644
index 2cc8a5f6fe0c..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/pm/Kbuild
+++ /dev/null
@@ -1,11 +0,0 @@
-# SPDX-License-Identifier: MIT
-nvkm-y += nvkm/engine/pm/base.o
-nvkm-y += nvkm/engine/pm/nv40.o
-nvkm-y += nvkm/engine/pm/nv50.o
-nvkm-y += nvkm/engine/pm/g84.o
-nvkm-y += nvkm/engine/pm/gt200.o
-nvkm-y += nvkm/engine/pm/gt215.o
-nvkm-y += nvkm/engine/pm/gf100.o
-nvkm-y += nvkm/engine/pm/gf108.o
-nvkm-y += nvkm/engine/pm/gf117.o
-nvkm-y += nvkm/engine/pm/gk104.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/pm/base.c
deleted file mode 100644
index 131db2645f84..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/pm/base.c
+++ /dev/null
@@ -1,867 +0,0 @@
-/*
- * Copyright 2013 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-#include "priv.h"
-
-#include <core/client.h>
-#include <core/option.h>
-
-#include <nvif/class.h>
-#include <nvif/if0002.h>
-#include <nvif/if0003.h>
-#include <nvif/ioctl.h>
-#include <nvif/unpack.h>
-
-static u8
-nvkm_pm_count_perfdom(struct nvkm_pm *pm)
-{
- struct nvkm_perfdom *dom;
- u8 domain_nr = 0;
-
- list_for_each_entry(dom, &pm->domains, head)
- domain_nr++;
- return domain_nr;
-}
-
-static u16
-nvkm_perfdom_count_perfsig(struct nvkm_perfdom *dom)
-{
- u16 signal_nr = 0;
- int i;
-
- if (dom) {
- for (i = 0; i < dom->signal_nr; i++) {
- if (dom->signal[i].name)
- signal_nr++;
- }
- }
- return signal_nr;
-}
-
-static struct nvkm_perfdom *
-nvkm_perfdom_find(struct nvkm_pm *pm, int di)
-{
- struct nvkm_perfdom *dom;
- int tmp = 0;
-
- list_for_each_entry(dom, &pm->domains, head) {
- if (tmp++ == di)
- return dom;
- }
- return NULL;
-}
-
-static struct nvkm_perfsig *
-nvkm_perfsig_find(struct nvkm_pm *pm, u8 di, u8 si, struct nvkm_perfdom **pdom)
-{
- struct nvkm_perfdom *dom = *pdom;
-
- if (dom == NULL) {
- dom = nvkm_perfdom_find(pm, di);
- if (dom == NULL)
- return NULL;
- *pdom = dom;
- }
-
- if (!dom->signal[si].name)
- return NULL;
- return &dom->signal[si];
-}
-
-static u8
-nvkm_perfsig_count_perfsrc(struct nvkm_perfsig *sig)
-{
- u8 source_nr = 0, i;
-
- for (i = 0; i < ARRAY_SIZE(sig->source); i++) {
- if (sig->source[i])
- source_nr++;
- }
- return source_nr;
-}
-
-static struct nvkm_perfsrc *
-nvkm_perfsrc_find(struct nvkm_pm *pm, struct nvkm_perfsig *sig, int si)
-{
- struct nvkm_perfsrc *src;
- bool found = false;
- int tmp = 1; /* Sources ID start from 1 */
- u8 i;
-
- for (i = 0; i < ARRAY_SIZE(sig->source) && sig->source[i]; i++) {
- if (sig->source[i] == si) {
- found = true;
- break;
- }
- }
-
- if (found) {
- list_for_each_entry(src, &pm->sources, head) {
- if (tmp++ == si)
- return src;
- }
- }
-
- return NULL;
-}
-
-static int
-nvkm_perfsrc_enable(struct nvkm_pm *pm, struct nvkm_perfctr *ctr)
-{
- struct nvkm_subdev *subdev = &pm->engine.subdev;
- struct nvkm_device *device = subdev->device;
- struct nvkm_perfdom *dom = NULL;
- struct nvkm_perfsig *sig;
- struct nvkm_perfsrc *src;
- u32 mask, value;
- int i, j;
-
- for (i = 0; i < 4; i++) {
- for (j = 0; j < 8 && ctr->source[i][j]; j++) {
- sig = nvkm_perfsig_find(pm, ctr->domain,
- ctr->signal[i], &dom);
- if (!sig)
- return -EINVAL;
-
- src = nvkm_perfsrc_find(pm, sig, ctr->source[i][j]);
- if (!src)
- return -EINVAL;
-
- /* set enable bit if needed */
- mask = value = 0x00000000;
- if (src->enable)
- mask = value = 0x80000000;
- mask |= (src->mask << src->shift);
- value |= ((ctr->source[i][j] >> 32) << src->shift);
-
- /* enable the source */
- nvkm_mask(device, src->addr, mask, value);
- nvkm_debug(subdev,
- "enabled source %08x %08x %08x\n",
- src->addr, mask, value);
- }
- }
- return 0;
-}
-
-static int
-nvkm_perfsrc_disable(struct nvkm_pm *pm, struct nvkm_perfctr *ctr)
-{
- struct nvkm_subdev *subdev = &pm->engine.subdev;
- struct nvkm_device *device = subdev->device;
- struct nvkm_perfdom *dom = NULL;
- struct nvkm_perfsig *sig;
- struct nvkm_perfsrc *src;
- u32 mask;
- int i, j;
-
- for (i = 0; i < 4; i++) {
- for (j = 0; j < 8 && ctr->source[i][j]; j++) {
- sig = nvkm_perfsig_find(pm, ctr->domain,
- ctr->signal[i], &dom);
- if (!sig)
- return -EINVAL;
-
- src = nvkm_perfsrc_find(pm, sig, ctr->source[i][j]);
- if (!src)
- return -EINVAL;
-
- /* unset enable bit if needed */
- mask = 0x00000000;
- if (src->enable)
- mask = 0x80000000;
- mask |= (src->mask << src->shift);
-
- /* disable the source */
- nvkm_mask(device, src->addr, mask, 0);
- nvkm_debug(subdev, "disabled source %08x %08x\n",
- src->addr, mask);
- }
- }
- return 0;
-}
-
-/*******************************************************************************
- * Perfdom object classes
- ******************************************************************************/
-static int
-nvkm_perfdom_init(struct nvkm_perfdom *dom, void *data, u32 size)
-{
- union {
- struct nvif_perfdom_init none;
- } *args = data;
- struct nvkm_object *object = &dom->object;
- struct nvkm_pm *pm = dom->perfmon->pm;
- int ret = -ENOSYS, i;
-
- nvif_ioctl(object, "perfdom init size %d\n", size);
- if (!(ret = nvif_unvers(ret, &data, &size, args->none))) {
- nvif_ioctl(object, "perfdom init\n");
- } else
- return ret;
-
- for (i = 0; i < 4; i++) {
- if (dom->ctr[i]) {
- dom->func->init(pm, dom, dom->ctr[i]);
-
- /* enable sources */
- nvkm_perfsrc_enable(pm, dom->ctr[i]);
- }
- }
-
- /* start next batch of counters for sampling */
- dom->func->next(pm, dom);
- return 0;
-}
-
-static int
-nvkm_perfdom_sample(struct nvkm_perfdom *dom, void *data, u32 size)
-{
- union {
- struct nvif_perfdom_sample none;
- } *args = data;
- struct nvkm_object *object = &dom->object;
- struct nvkm_pm *pm = dom->perfmon->pm;
- int ret = -ENOSYS;
-
- nvif_ioctl(object, "perfdom sample size %d\n", size);
- if (!(ret = nvif_unvers(ret, &data, &size, args->none))) {
- nvif_ioctl(object, "perfdom sample\n");
- } else
- return ret;
- pm->sequence++;
-
- /* sample previous batch of counters */
- list_for_each_entry(dom, &pm->domains, head)
- dom->func->next(pm, dom);
-
- return 0;
-}
-
-static int
-nvkm_perfdom_read(struct nvkm_perfdom *dom, void *data, u32 size)
-{
- union {
- struct nvif_perfdom_read_v0 v0;
- } *args = data;
- struct nvkm_object *object = &dom->object;
- struct nvkm_pm *pm = dom->perfmon->pm;
- int ret = -ENOSYS, i;
-
- nvif_ioctl(object, "perfdom read size %d\n", size);
- if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) {
- nvif_ioctl(object, "perfdom read vers %d\n", args->v0.version);
- } else
- return ret;
-
- for (i = 0; i < 4; i++) {
- if (dom->ctr[i])
- dom->func->read(pm, dom, dom->ctr[i]);
- }
-
- if (!dom->clk)
- return -EAGAIN;
-
- for (i = 0; i < 4; i++)
- if (dom->ctr[i])
- args->v0.ctr[i] = dom->ctr[i]->ctr;
- args->v0.clk = dom->clk;
- return 0;
-}
-
-static int
-nvkm_perfdom_mthd(struct nvkm_object *object, u32 mthd, void *data, u32 size)
-{
- struct nvkm_perfdom *dom = nvkm_perfdom(object);
- switch (mthd) {
- case NVIF_PERFDOM_V0_INIT:
- return nvkm_perfdom_init(dom, data, size);
- case NVIF_PERFDOM_V0_SAMPLE:
- return nvkm_perfdom_sample(dom, data, size);
- case NVIF_PERFDOM_V0_READ:
- return nvkm_perfdom_read(dom, data, size);
- default:
- break;
- }
- return -EINVAL;
-}
-
-static void *
-nvkm_perfdom_dtor(struct nvkm_object *object)
-{
- struct nvkm_perfdom *dom = nvkm_perfdom(object);
- struct nvkm_pm *pm = dom->perfmon->pm;
- int i;
-
- for (i = 0; i < 4; i++) {
- struct nvkm_perfctr *ctr = dom->ctr[i];
- if (ctr) {
- nvkm_perfsrc_disable(pm, ctr);
- if (ctr->head.next)
- list_del(&ctr->head);
- }
- kfree(ctr);
- }
-
- return dom;
-}
-
-static int
-nvkm_perfctr_new(struct nvkm_perfdom *dom, int slot, u8 domain,
- struct nvkm_perfsig *signal[4], u64 source[4][8],
- u16 logic_op, struct nvkm_perfctr **pctr)
-{
- struct nvkm_perfctr *ctr;
- int i, j;
-
- if (!dom)
- return -EINVAL;
-
- ctr = *pctr = kzalloc(sizeof(*ctr), GFP_KERNEL);
- if (!ctr)
- return -ENOMEM;
-
- ctr->domain = domain;
- ctr->logic_op = logic_op;
- ctr->slot = slot;
- for (i = 0; i < 4; i++) {
- if (signal[i]) {
- ctr->signal[i] = signal[i] - dom->signal;
- for (j = 0; j < 8; j++)
- ctr->source[i][j] = source[i][j];
- }
- }
- list_add_tail(&ctr->head, &dom->list);
-
- return 0;
-}
-
-static const struct nvkm_object_func
-nvkm_perfdom = {
- .dtor = nvkm_perfdom_dtor,
- .mthd = nvkm_perfdom_mthd,
-};
-
-static int
-nvkm_perfdom_new_(struct nvkm_perfmon *perfmon,
- const struct nvkm_oclass *oclass, void *data, u32 size,
- struct nvkm_object **pobject)
-{
- union {
- struct nvif_perfdom_v0 v0;
- } *args = data;
- struct nvkm_pm *pm = perfmon->pm;
- struct nvkm_object *parent = oclass->parent;
- struct nvkm_perfdom *sdom = NULL;
- struct nvkm_perfctr *ctr[4] = {};
- struct nvkm_perfdom *dom;
- int c, s, m;
- int ret = -ENOSYS;
-
- nvif_ioctl(parent, "create perfdom size %d\n", size);
- if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) {
- nvif_ioctl(parent, "create perfdom vers %d dom %d mode %02x\n",
- args->v0.version, args->v0.domain, args->v0.mode);
- } else
- return ret;
-
- for (c = 0; c < ARRAY_SIZE(args->v0.ctr); c++) {
- struct nvkm_perfsig *sig[4] = {};
- u64 src[4][8] = {};
-
- for (s = 0; s < ARRAY_SIZE(args->v0.ctr[c].signal); s++) {
- sig[s] = nvkm_perfsig_find(pm, args->v0.domain,
- args->v0.ctr[c].signal[s],
- &sdom);
- if (args->v0.ctr[c].signal[s] && !sig[s])
- return -EINVAL;
-
- for (m = 0; m < 8; m++) {
- src[s][m] = args->v0.ctr[c].source[s][m];
- if (src[s][m] && !nvkm_perfsrc_find(pm, sig[s],
- src[s][m]))
- return -EINVAL;
- }
- }
-
- ret = nvkm_perfctr_new(sdom, c, args->v0.domain, sig, src,
- args->v0.ctr[c].logic_op, &ctr[c]);
- if (ret)
- return ret;
- }
-
- if (!sdom)
- return -EINVAL;
-
- if (!(dom = kzalloc(sizeof(*dom), GFP_KERNEL)))
- return -ENOMEM;
- nvkm_object_ctor(&nvkm_perfdom, oclass, &dom->object);
- dom->perfmon = perfmon;
- *pobject = &dom->object;
-
- dom->func = sdom->func;
- dom->addr = sdom->addr;
- dom->mode = args->v0.mode;
- for (c = 0; c < ARRAY_SIZE(ctr); c++)
- dom->ctr[c] = ctr[c];
- return 0;
-}
-
-/*******************************************************************************
- * Perfmon object classes
- ******************************************************************************/
-static int
-nvkm_perfmon_mthd_query_domain(struct nvkm_perfmon *perfmon,
- void *data, u32 size)
-{
- union {
- struct nvif_perfmon_query_domain_v0 v0;
- } *args = data;
- struct nvkm_object *object = &perfmon->object;
- struct nvkm_pm *pm = perfmon->pm;
- struct nvkm_perfdom *dom;
- u8 domain_nr;
- int di, ret = -ENOSYS;
-
- nvif_ioctl(object, "perfmon query domain size %d\n", size);
- if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) {
- nvif_ioctl(object, "perfmon domain vers %d iter %02x\n",
- args->v0.version, args->v0.iter);
- di = (args->v0.iter & 0xff) - 1;
- } else
- return ret;
-
- domain_nr = nvkm_pm_count_perfdom(pm);
- if (di >= (int)domain_nr)
- return -EINVAL;
-
- if (di >= 0) {
- dom = nvkm_perfdom_find(pm, di);
- if (dom == NULL)
- return -EINVAL;
-
- args->v0.id = di;
- args->v0.signal_nr = nvkm_perfdom_count_perfsig(dom);
- strscpy(args->v0.name, dom->name, sizeof(args->v0.name));
-
- /* Currently only global counters (PCOUNTER) are implemented
- * but this will be different for local counters (MP). */
- args->v0.counter_nr = 4;
- }
-
- if (++di < domain_nr) {
- args->v0.iter = ++di;
- return 0;
- }
-
- args->v0.iter = 0xff;
- return 0;
-}
-
-static int
-nvkm_perfmon_mthd_query_signal(struct nvkm_perfmon *perfmon,
- void *data, u32 size)
-{
- union {
- struct nvif_perfmon_query_signal_v0 v0;
- } *args = data;
- struct nvkm_object *object = &perfmon->object;
- struct nvkm_pm *pm = perfmon->pm;
- struct nvkm_device *device = pm->engine.subdev.device;
- struct nvkm_perfdom *dom;
- struct nvkm_perfsig *sig;
- const bool all = nvkm_boolopt(device->cfgopt, "NvPmShowAll", false);
- const bool raw = nvkm_boolopt(device->cfgopt, "NvPmUnnamed", all);
- int ret = -ENOSYS, si;
-
- nvif_ioctl(object, "perfmon query signal size %d\n", size);
- if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) {
- nvif_ioctl(object,
- "perfmon query signal vers %d dom %d iter %04x\n",
- args->v0.version, args->v0.domain, args->v0.iter);
- si = (args->v0.iter & 0xffff) - 1;
- } else
- return ret;
-
- dom = nvkm_perfdom_find(pm, args->v0.domain);
- if (dom == NULL || si >= (int)dom->signal_nr)
- return -EINVAL;
-
- if (si >= 0) {
- sig = &dom->signal[si];
- if (raw || !sig->name) {
- snprintf(args->v0.name, sizeof(args->v0.name),
- "/%s/%02x", dom->name, si);
- } else {
- strscpy(args->v0.name, sig->name, sizeof(args->v0.name));
- }
-
- args->v0.signal = si;
- args->v0.source_nr = nvkm_perfsig_count_perfsrc(sig);
- }
-
- while (++si < dom->signal_nr) {
- if (all || dom->signal[si].name) {
- args->v0.iter = ++si;
- return 0;
- }
- }
-
- args->v0.iter = 0xffff;
- return 0;
-}
-
-static int
-nvkm_perfmon_mthd_query_source(struct nvkm_perfmon *perfmon,
- void *data, u32 size)
-{
- union {
- struct nvif_perfmon_query_source_v0 v0;
- } *args = data;
- struct nvkm_object *object = &perfmon->object;
- struct nvkm_pm *pm = perfmon->pm;
- struct nvkm_perfdom *dom = NULL;
- struct nvkm_perfsig *sig;
- struct nvkm_perfsrc *src;
- u8 source_nr = 0;
- int si, ret = -ENOSYS;
-
- nvif_ioctl(object, "perfmon query source size %d\n", size);
- if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) {
- nvif_ioctl(object,
- "perfmon source vers %d dom %d sig %02x iter %02x\n",
- args->v0.version, args->v0.domain, args->v0.signal,
- args->v0.iter);
- si = (args->v0.iter & 0xff) - 1;
- } else
- return ret;
-
- sig = nvkm_perfsig_find(pm, args->v0.domain, args->v0.signal, &dom);
- if (!sig)
- return -EINVAL;
-
- source_nr = nvkm_perfsig_count_perfsrc(sig);
- if (si >= (int)source_nr)
- return -EINVAL;
-
- if (si >= 0) {
- src = nvkm_perfsrc_find(pm, sig, sig->source[si]);
- if (!src)
- return -EINVAL;
-
- args->v0.source = sig->source[si];
- args->v0.mask = src->mask;
- strscpy(args->v0.name, src->name, sizeof(args->v0.name));
- }
-
- if (++si < source_nr) {
- args->v0.iter = ++si;
- return 0;
- }
-
- args->v0.iter = 0xff;
- return 0;
-}
-
-static int
-nvkm_perfmon_mthd(struct nvkm_object *object, u32 mthd, void *data, u32 size)
-{
- struct nvkm_perfmon *perfmon = nvkm_perfmon(object);
- switch (mthd) {
- case NVIF_PERFMON_V0_QUERY_DOMAIN:
- return nvkm_perfmon_mthd_query_domain(perfmon, data, size);
- case NVIF_PERFMON_V0_QUERY_SIGNAL:
- return nvkm_perfmon_mthd_query_signal(perfmon, data, size);
- case NVIF_PERFMON_V0_QUERY_SOURCE:
- return nvkm_perfmon_mthd_query_source(perfmon, data, size);
- default:
- break;
- }
- return -EINVAL;
-}
-
-static int
-nvkm_perfmon_child_new(const struct nvkm_oclass *oclass, void *data, u32 size,
- struct nvkm_object **pobject)
-{
- struct nvkm_perfmon *perfmon = nvkm_perfmon(oclass->parent);
- return nvkm_perfdom_new_(perfmon, oclass, data, size, pobject);
-}
-
-static int
-nvkm_perfmon_child_get(struct nvkm_object *object, int index,
- struct nvkm_oclass *oclass)
-{
- if (index == 0) {
- oclass->base.oclass = NVIF_CLASS_PERFDOM;
- oclass->base.minver = 0;
- oclass->base.maxver = 0;
- oclass->ctor = nvkm_perfmon_child_new;
- return 0;
- }
- return -EINVAL;
-}
-
-static void *
-nvkm_perfmon_dtor(struct nvkm_object *object)
-{
- struct nvkm_perfmon *perfmon = nvkm_perfmon(object);
- struct nvkm_pm *pm = perfmon->pm;
- spin_lock(&pm->client.lock);
- if (pm->client.object == &perfmon->object)
- pm->client.object = NULL;
- spin_unlock(&pm->client.lock);
- return perfmon;
-}
-
-static const struct nvkm_object_func
-nvkm_perfmon = {
- .dtor = nvkm_perfmon_dtor,
- .mthd = nvkm_perfmon_mthd,
- .sclass = nvkm_perfmon_child_get,
-};
-
-static int
-nvkm_perfmon_new(struct nvkm_pm *pm, const struct nvkm_oclass *oclass,
- void *data, u32 size, struct nvkm_object **pobject)
-{
- struct nvkm_perfmon *perfmon;
-
- if (!(perfmon = kzalloc(sizeof(*perfmon), GFP_KERNEL)))
- return -ENOMEM;
- nvkm_object_ctor(&nvkm_perfmon, oclass, &perfmon->object);
- perfmon->pm = pm;
- *pobject = &perfmon->object;
- return 0;
-}
-
-/*******************************************************************************
- * PPM engine/subdev functions
- ******************************************************************************/
-
-static int
-nvkm_pm_oclass_new(struct nvkm_device *device, const struct nvkm_oclass *oclass,
- void *data, u32 size, struct nvkm_object **pobject)
-{
- struct nvkm_pm *pm = nvkm_pm(oclass->engine);
- int ret;
-
- ret = nvkm_perfmon_new(pm, oclass, data, size, pobject);
- if (ret)
- return ret;
-
- spin_lock(&pm->client.lock);
- if (pm->client.object == NULL)
- pm->client.object = *pobject;
- ret = (pm->client.object == *pobject) ? 0 : -EBUSY;
- spin_unlock(&pm->client.lock);
- return ret;
-}
-
-static const struct nvkm_device_oclass
-nvkm_pm_oclass = {
- .base.oclass = NVIF_CLASS_PERFMON,
- .base.minver = -1,
- .base.maxver = -1,
- .ctor = nvkm_pm_oclass_new,
-};
-
-static int
-nvkm_pm_oclass_get(struct nvkm_oclass *oclass, int index,
- const struct nvkm_device_oclass **class)
-{
- if (index == 0) {
- oclass->base = nvkm_pm_oclass.base;
- *class = &nvkm_pm_oclass;
- return index;
- }
- return 1;
-}
-
-static int
-nvkm_perfsrc_new(struct nvkm_pm *pm, struct nvkm_perfsig *sig,
- const struct nvkm_specsrc *spec)
-{
- const struct nvkm_specsrc *ssrc;
- const struct nvkm_specmux *smux;
- struct nvkm_perfsrc *src;
- u8 source_nr = 0;
-
- if (!spec) {
- /* No sources are defined for this signal. */
- return 0;
- }
-
- ssrc = spec;
- while (ssrc->name) {
- smux = ssrc->mux;
- while (smux->name) {
- bool found = false;
- u8 source_id = 0;
- u32 len;
-
- list_for_each_entry(src, &pm->sources, head) {
- if (src->addr == ssrc->addr &&
- src->shift == smux->shift) {
- found = true;
- break;
- }
- source_id++;
- }
-
- if (!found) {
- src = kzalloc(sizeof(*src), GFP_KERNEL);
- if (!src)
- return -ENOMEM;
-
- src->addr = ssrc->addr;
- src->mask = smux->mask;
- src->shift = smux->shift;
- src->enable = smux->enable;
-
- len = strlen(ssrc->name) +
- strlen(smux->name) + 2;
- src->name = kzalloc(len, GFP_KERNEL);
- if (!src->name) {
- kfree(src);
- return -ENOMEM;
- }
- snprintf(src->name, len, "%s_%s", ssrc->name,
- smux->name);
-
- list_add_tail(&src->head, &pm->sources);
- }
-
- sig->source[source_nr++] = source_id + 1;
- smux++;
- }
- ssrc++;
- }
-
- return 0;
-}
-
-int
-nvkm_perfdom_new(struct nvkm_pm *pm, const char *name, u32 mask,
- u32 base, u32 size_unit, u32 size_domain,
- const struct nvkm_specdom *spec)
-{
- const struct nvkm_specdom *sdom;
- const struct nvkm_specsig *ssig;
- struct nvkm_perfdom *dom;
- int ret, i;
-
- for (i = 0; i == 0 || mask; i++) {
- u32 addr = base + (i * size_unit);
- if (i && !(mask & (1 << i)))
- continue;
-
- sdom = spec;
- while (sdom->signal_nr) {
- dom = kzalloc(struct_size(dom, signal, sdom->signal_nr),
- GFP_KERNEL);
- if (!dom)
- return -ENOMEM;
-
- if (mask) {
- snprintf(dom->name, sizeof(dom->name),
- "%s/%02x/%02x", name, i,
- (int)(sdom - spec));
- } else {
- snprintf(dom->name, sizeof(dom->name),
- "%s/%02x", name, (int)(sdom - spec));
- }
-
- list_add_tail(&dom->head, &pm->domains);
- INIT_LIST_HEAD(&dom->list);
- dom->func = sdom->func;
- dom->addr = addr;
- dom->signal_nr = sdom->signal_nr;
-
- ssig = (sdom++)->signal;
- while (ssig->name) {
- struct nvkm_perfsig *sig =
- &dom->signal[ssig->signal];
- sig->name = ssig->name;
- ret = nvkm_perfsrc_new(pm, sig, ssig->source);
- if (ret)
- return ret;
- ssig++;
- }
-
- addr += size_domain;
- }
-
- mask &= ~(1 << i);
- }
-
- return 0;
-}
-
-static int
-nvkm_pm_fini(struct nvkm_engine *engine, bool suspend)
-{
- struct nvkm_pm *pm = nvkm_pm(engine);
- if (pm->func->fini)
- pm->func->fini(pm);
- return 0;
-}
-
-static void *
-nvkm_pm_dtor(struct nvkm_engine *engine)
-{
- struct nvkm_pm *pm = nvkm_pm(engine);
- struct nvkm_perfdom *dom, *next_dom;
- struct nvkm_perfsrc *src, *next_src;
-
- list_for_each_entry_safe(dom, next_dom, &pm->domains, head) {
- list_del(&dom->head);
- kfree(dom);
- }
-
- list_for_each_entry_safe(src, next_src, &pm->sources, head) {
- list_del(&src->head);
- kfree(src->name);
- kfree(src);
- }
-
- return pm;
-}
-
-static const struct nvkm_engine_func
-nvkm_pm = {
- .dtor = nvkm_pm_dtor,
- .fini = nvkm_pm_fini,
- .base.sclass = nvkm_pm_oclass_get,
-};
-
-int
-nvkm_pm_ctor(const struct nvkm_pm_func *func, struct nvkm_device *device,
- enum nvkm_subdev_type type, int inst, struct nvkm_pm *pm)
-{
- pm->func = func;
- INIT_LIST_HEAD(&pm->domains);
- INIT_LIST_HEAD(&pm->sources);
- spin_lock_init(&pm->client.lock);
- return nvkm_engine_ctor(&nvkm_pm, device, type, inst, true, &pm->engine);
-}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/g84.c b/drivers/gpu/drm/nouveau/nvkm/engine/pm/g84.c
deleted file mode 100644
index 0086d00eb162..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/pm/g84.c
+++ /dev/null
@@ -1,165 +0,0 @@
-/*
- * Copyright 2013 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-#include "nv40.h"
-
-const struct nvkm_specsrc
-g84_vfetch_sources[] = {
- { 0x400c0c, (const struct nvkm_specmux[]) {
- { 0x3, 0, "unk0" },
- {}
- }, "pgraph_vfetch_unk0c" },
- {}
-};
-
-static const struct nvkm_specsrc
-g84_prop_sources[] = {
- { 0x408e50, (const struct nvkm_specmux[]) {
- { 0x1f, 0, "sel", true },
- {}
- }, "pgraph_tpc0_prop_pm_mux" },
- {}
-};
-
-static const struct nvkm_specsrc
-g84_crop_sources[] = {
- { 0x407008, (const struct nvkm_specmux[]) {
- { 0xf, 0, "sel0", true },
- { 0x7, 16, "sel1", true },
- {}
- }, "pgraph_rop0_crop_pm_mux" },
- {}
-};
-
-static const struct nvkm_specsrc
-g84_tex_sources[] = {
- { 0x408808, (const struct nvkm_specmux[]) {
- { 0xfffff, 0, "unk0" },
- {}
- }, "pgraph_tpc0_tex_unk08" },
- {}
-};
-
-static const struct nvkm_specdom
-g84_pm[] = {
- { 0x20, (const struct nvkm_specsig[]) {
- {}
- }, &nv40_perfctr_func },
- { 0xf0, (const struct nvkm_specsig[]) {
- { 0xbd, "pc01_gr_idle" },
- { 0x5e, "pc01_strmout_00" },
- { 0x5f, "pc01_strmout_01" },
- { 0xd2, "pc01_trast_00" },
- { 0xd3, "pc01_trast_01" },
- { 0xd4, "pc01_trast_02" },
- { 0xd5, "pc01_trast_03" },
- { 0xd8, "pc01_trast_04" },
- { 0xd9, "pc01_trast_05" },
- { 0x5c, "pc01_vattr_00" },
- { 0x5d, "pc01_vattr_01" },
- { 0x66, "pc01_vfetch_00", g84_vfetch_sources },
- { 0x67, "pc01_vfetch_01", g84_vfetch_sources },
- { 0x68, "pc01_vfetch_02", g84_vfetch_sources },
- { 0x69, "pc01_vfetch_03", g84_vfetch_sources },
- { 0x6a, "pc01_vfetch_04", g84_vfetch_sources },
- { 0x6b, "pc01_vfetch_05", g84_vfetch_sources },
- { 0x6c, "pc01_vfetch_06", g84_vfetch_sources },
- { 0x6d, "pc01_vfetch_07", g84_vfetch_sources },
- { 0x6e, "pc01_vfetch_08", g84_vfetch_sources },
- { 0x6f, "pc01_vfetch_09", g84_vfetch_sources },
- { 0x70, "pc01_vfetch_0a", g84_vfetch_sources },
- { 0x71, "pc01_vfetch_0b", g84_vfetch_sources },
- { 0x72, "pc01_vfetch_0c", g84_vfetch_sources },
- { 0x73, "pc01_vfetch_0d", g84_vfetch_sources },
- { 0x74, "pc01_vfetch_0e", g84_vfetch_sources },
- { 0x75, "pc01_vfetch_0f", g84_vfetch_sources },
- { 0x76, "pc01_vfetch_10", g84_vfetch_sources },
- { 0x77, "pc01_vfetch_11", g84_vfetch_sources },
- { 0x78, "pc01_vfetch_12", g84_vfetch_sources },
- { 0x79, "pc01_vfetch_13", g84_vfetch_sources },
- { 0x7a, "pc01_vfetch_14", g84_vfetch_sources },
- { 0x7b, "pc01_vfetch_15", g84_vfetch_sources },
- { 0x7c, "pc01_vfetch_16", g84_vfetch_sources },
- { 0x7d, "pc01_vfetch_17", g84_vfetch_sources },
- { 0x7e, "pc01_vfetch_18", g84_vfetch_sources },
- { 0x7f, "pc01_vfetch_19", g84_vfetch_sources },
- { 0x07, "pc01_zcull_00", nv50_zcull_sources },
- { 0x08, "pc01_zcull_01", nv50_zcull_sources },
- { 0x09, "pc01_zcull_02", nv50_zcull_sources },
- { 0x0a, "pc01_zcull_03", nv50_zcull_sources },
- { 0x0b, "pc01_zcull_04", nv50_zcull_sources },
- { 0x0c, "pc01_zcull_05", nv50_zcull_sources },
- { 0xa4, "pc01_unk00" },
- { 0xec, "pc01_trailer" },
- {}
- }, &nv40_perfctr_func },
- { 0xa0, (const struct nvkm_specsig[]) {
- { 0x30, "pc02_crop_00", g84_crop_sources },
- { 0x31, "pc02_crop_01", g84_crop_sources },
- { 0x32, "pc02_crop_02", g84_crop_sources },
- { 0x33, "pc02_crop_03", g84_crop_sources },
- { 0x00, "pc02_prop_00", g84_prop_sources },
- { 0x01, "pc02_prop_01", g84_prop_sources },
- { 0x02, "pc02_prop_02", g84_prop_sources },
- { 0x03, "pc02_prop_03", g84_prop_sources },
- { 0x04, "pc02_prop_04", g84_prop_sources },
- { 0x05, "pc02_prop_05", g84_prop_sources },
- { 0x06, "pc02_prop_06", g84_prop_sources },
- { 0x07, "pc02_prop_07", g84_prop_sources },
- { 0x48, "pc02_tex_00", g84_tex_sources },
- { 0x49, "pc02_tex_01", g84_tex_sources },
- { 0x4a, "pc02_tex_02", g84_tex_sources },
- { 0x4b, "pc02_tex_03", g84_tex_sources },
- { 0x1a, "pc02_tex_04", g84_tex_sources },
- { 0x1b, "pc02_tex_05", g84_tex_sources },
- { 0x1c, "pc02_tex_06", g84_tex_sources },
- { 0x44, "pc02_zrop_00", nv50_zrop_sources },
- { 0x45, "pc02_zrop_01", nv50_zrop_sources },
- { 0x46, "pc02_zrop_02", nv50_zrop_sources },
- { 0x47, "pc02_zrop_03", nv50_zrop_sources },
- { 0x8c, "pc02_trailer" },
- {}
- }, &nv40_perfctr_func },
- { 0x20, (const struct nvkm_specsig[]) {
- {}
- }, &nv40_perfctr_func },
- { 0x20, (const struct nvkm_specsig[]) {
- {}
- }, &nv40_perfctr_func },
- { 0x20, (const struct nvkm_specsig[]) {
- {}
- }, &nv40_perfctr_func },
- { 0x20, (const struct nvkm_specsig[]) {
- {}
- }, &nv40_perfctr_func },
- { 0x20, (const struct nvkm_specsig[]) {
- {}
- }, &nv40_perfctr_func },
- {}
-};
-
-int
-g84_pm_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_pm **ppm)
-{
- return nv40_pm_new_(g84_pm, device, type, inst, ppm);
-}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/pm/gf100.c
deleted file mode 100644
index 8e02701def8e..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/pm/gf100.c
+++ /dev/null
@@ -1,243 +0,0 @@
-/*
- * Copyright 2013 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-#include "gf100.h"
-
-const struct nvkm_specsrc
-gf100_pbfb_sources[] = {
- { 0x10f100, (const struct nvkm_specmux[]) {
- { 0x1, 0, "unk0" },
- { 0x3f, 4, "unk4" },
- {}
- }, "pbfb_broadcast_pm_unk100" },
- {}
-};
-
-const struct nvkm_specsrc
-gf100_pmfb_sources[] = {
- { 0x140028, (const struct nvkm_specmux[]) {
- { 0x3fff, 0, "unk0" },
- { 0x7, 16, "unk16" },
- { 0x3, 24, "unk24" },
- { 0x2, 29, "unk29" },
- {}
- }, "pmfb0_pm_unk28" },
- {}
-};
-
-static const struct nvkm_specsrc
-gf100_l1_sources[] = {
- { 0x5044a8, (const struct nvkm_specmux[]) {
- { 0x3f, 0, "sel", true },
- {}
- }, "pgraph_gpc0_tpc0_l1_pm_mux" },
- {}
-};
-
-static const struct nvkm_specsrc
-gf100_tex_sources[] = {
- { 0x5042c0, (const struct nvkm_specmux[]) {
- { 0xf, 0, "sel0", true },
- { 0x7, 8, "sel1", true },
- {}
- }, "pgraph_gpc0_tpc0_tex_pm_mux_c_d" },
- {}
-};
-
-static const struct nvkm_specsrc
-gf100_unk400_sources[] = {
- { 0x50440c, (const struct nvkm_specmux[]) {
- { 0x3f, 0, "sel", true },
- {}
- }, "pgraph_gpc0_tpc0_unk400_pm_mux" },
- {}
-};
-
-static const struct nvkm_specdom
-gf100_pm_hub[] = {
- {}
-};
-
-const struct nvkm_specdom
-gf100_pm_gpc[] = {
- { 0xe0, (const struct nvkm_specsig[]) {
- { 0x00, "gpc00_l1_00", gf100_l1_sources },
- { 0x01, "gpc00_l1_01", gf100_l1_sources },
- { 0x02, "gpc00_l1_02", gf100_l1_sources },
- { 0x03, "gpc00_l1_03", gf100_l1_sources },
- { 0x05, "gpc00_l1_04", gf100_l1_sources },
- { 0x06, "gpc00_l1_05", gf100_l1_sources },
- { 0x0a, "gpc00_tex_00", gf100_tex_sources },
- { 0x0b, "gpc00_tex_01", gf100_tex_sources },
- { 0x0c, "gpc00_tex_02", gf100_tex_sources },
- { 0x0d, "gpc00_tex_03", gf100_tex_sources },
- { 0x0e, "gpc00_tex_04", gf100_tex_sources },
- { 0x0f, "gpc00_tex_05", gf100_tex_sources },
- { 0x10, "gpc00_tex_06", gf100_tex_sources },
- { 0x11, "gpc00_tex_07", gf100_tex_sources },
- { 0x12, "gpc00_tex_08", gf100_tex_sources },
- { 0x26, "gpc00_unk400_00", gf100_unk400_sources },
- {}
- }, &gf100_perfctr_func },
- {}
-};
-
-static const struct nvkm_specdom
-gf100_pm_part[] = {
- { 0xe0, (const struct nvkm_specsig[]) {
- { 0x0f, "part00_pbfb_00", gf100_pbfb_sources },
- { 0x10, "part00_pbfb_01", gf100_pbfb_sources },
- { 0x21, "part00_pmfb_00", gf100_pmfb_sources },
- { 0x04, "part00_pmfb_01", gf100_pmfb_sources },
- { 0x00, "part00_pmfb_02", gf100_pmfb_sources },
- { 0x02, "part00_pmfb_03", gf100_pmfb_sources },
- { 0x01, "part00_pmfb_04", gf100_pmfb_sources },
- { 0x2e, "part00_pmfb_05", gf100_pmfb_sources },
- { 0x2f, "part00_pmfb_06", gf100_pmfb_sources },
- { 0x1b, "part00_pmfb_07", gf100_pmfb_sources },
- { 0x1c, "part00_pmfb_08", gf100_pmfb_sources },
- { 0x1d, "part00_pmfb_09", gf100_pmfb_sources },
- { 0x1e, "part00_pmfb_0a", gf100_pmfb_sources },
- { 0x1f, "part00_pmfb_0b", gf100_pmfb_sources },
- {}
- }, &gf100_perfctr_func },
- {}
-};
-
-static void
-gf100_perfctr_init(struct nvkm_pm *pm, struct nvkm_perfdom *dom,
- struct nvkm_perfctr *ctr)
-{
- struct nvkm_device *device = pm->engine.subdev.device;
- u32 log = ctr->logic_op;
- u32 src = 0x00000000;
- int i;
-
- for (i = 0; i < 4; i++)
- src |= ctr->signal[i] << (i * 8);
-
- nvkm_wr32(device, dom->addr + 0x09c, 0x00040002 | (dom->mode << 3));
- nvkm_wr32(device, dom->addr + 0x100, 0x00000000);
- nvkm_wr32(device, dom->addr + 0x040 + (ctr->slot * 0x08), src);
- nvkm_wr32(device, dom->addr + 0x044 + (ctr->slot * 0x08), log);
-}
-
-static void
-gf100_perfctr_read(struct nvkm_pm *pm, struct nvkm_perfdom *dom,
- struct nvkm_perfctr *ctr)
-{
- struct nvkm_device *device = pm->engine.subdev.device;
-
- switch (ctr->slot) {
- case 0: ctr->ctr = nvkm_rd32(device, dom->addr + 0x08c); break;
- case 1: ctr->ctr = nvkm_rd32(device, dom->addr + 0x088); break;
- case 2: ctr->ctr = nvkm_rd32(device, dom->addr + 0x080); break;
- case 3: ctr->ctr = nvkm_rd32(device, dom->addr + 0x090); break;
- }
- dom->clk = nvkm_rd32(device, dom->addr + 0x070);
-}
-
-static void
-gf100_perfctr_next(struct nvkm_pm *pm, struct nvkm_perfdom *dom)
-{
- struct nvkm_device *device = pm->engine.subdev.device;
- nvkm_wr32(device, dom->addr + 0x06c, dom->signal_nr - 0x40 + 0x27);
- nvkm_wr32(device, dom->addr + 0x0ec, 0x00000011);
-}
-
-const struct nvkm_funcdom
-gf100_perfctr_func = {
- .init = gf100_perfctr_init,
- .read = gf100_perfctr_read,
- .next = gf100_perfctr_next,
-};
-
-static void
-gf100_pm_fini(struct nvkm_pm *pm)
-{
- struct nvkm_device *device = pm->engine.subdev.device;
- nvkm_mask(device, 0x000200, 0x10000000, 0x00000000);
- nvkm_mask(device, 0x000200, 0x10000000, 0x10000000);
-}
-
-static const struct nvkm_pm_func
-gf100_pm_ = {
- .fini = gf100_pm_fini,
-};
-
-int
-gf100_pm_new_(const struct gf100_pm_func *func, struct nvkm_device *device,
- enum nvkm_subdev_type type, int inst, struct nvkm_pm **ppm)
-{
- struct nvkm_pm *pm;
- u32 mask;
- int ret;
-
- if (!(pm = *ppm = kzalloc(sizeof(*pm), GFP_KERNEL)))
- return -ENOMEM;
-
- ret = nvkm_pm_ctor(&gf100_pm_, device, type, inst, pm);
- if (ret)
- return ret;
-
- /* HUB */
- ret = nvkm_perfdom_new(pm, "hub", 0, 0x1b0000, 0, 0x200,
- func->doms_hub);
- if (ret)
- return ret;
-
- /* GPC */
- mask = (1 << nvkm_rd32(device, 0x022430)) - 1;
- mask &= ~nvkm_rd32(device, 0x022504);
- mask &= ~nvkm_rd32(device, 0x022584);
-
- ret = nvkm_perfdom_new(pm, "gpc", mask, 0x180000,
- 0x1000, 0x200, func->doms_gpc);
- if (ret)
- return ret;
-
- /* PART */
- mask = (1 << nvkm_rd32(device, 0x022438)) - 1;
- mask &= ~nvkm_rd32(device, 0x022548);
- mask &= ~nvkm_rd32(device, 0x0225c8);
-
- ret = nvkm_perfdom_new(pm, "part", mask, 0x1a0000,
- 0x1000, 0x200, func->doms_part);
- if (ret)
- return ret;
-
- return 0;
-}
-
-static const struct gf100_pm_func
-gf100_pm = {
- .doms_gpc = gf100_pm_gpc,
- .doms_hub = gf100_pm_hub,
- .doms_part = gf100_pm_part,
-};
-
-int
-gf100_pm_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_pm **ppm)
-{
- return gf100_pm_new_(&gf100_pm, device, type, inst, ppm);
-}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/gf100.h b/drivers/gpu/drm/nouveau/nvkm/engine/pm/gf100.h
deleted file mode 100644
index bc4b014c4e8e..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/pm/gf100.h
+++ /dev/null
@@ -1,20 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-#ifndef __NVKM_PM_NVC0_H__
-#define __NVKM_PM_NVC0_H__
-#include "priv.h"
-
-struct gf100_pm_func {
- const struct nvkm_specdom *doms_hub;
- const struct nvkm_specdom *doms_gpc;
- const struct nvkm_specdom *doms_part;
-};
-
-int gf100_pm_new_(const struct gf100_pm_func *, struct nvkm_device *, enum nvkm_subdev_type, int,
- struct nvkm_pm **);
-
-extern const struct nvkm_funcdom gf100_perfctr_func;
-extern const struct nvkm_specdom gf100_pm_gpc[];
-
-extern const struct nvkm_specsrc gf100_pbfb_sources[];
-extern const struct nvkm_specsrc gf100_pmfb_sources[];
-#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/gf108.c b/drivers/gpu/drm/nouveau/nvkm/engine/pm/gf108.c
deleted file mode 100644
index 505565866b59..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/pm/gf108.c
+++ /dev/null
@@ -1,66 +0,0 @@
-/*
- * Copyright 2015 Samuel Pitoiset
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Samuel Pitoiset
- */
-#include "gf100.h"
-
-static const struct nvkm_specdom
-gf108_pm_hub[] = {
- {}
-};
-
-static const struct nvkm_specdom
-gf108_pm_part[] = {
- { 0xe0, (const struct nvkm_specsig[]) {
- { 0x14, "part00_pbfb_00", gf100_pbfb_sources },
- { 0x15, "part00_pbfb_01", gf100_pbfb_sources },
- { 0x20, "part00_pbfb_02", gf100_pbfb_sources },
- { 0x21, "part00_pbfb_03", gf100_pbfb_sources },
- { 0x01, "part00_pmfb_00", gf100_pmfb_sources },
- { 0x04, "part00_pmfb_01", gf100_pmfb_sources },
- { 0x05, "part00_pmfb_02", gf100_pmfb_sources},
- { 0x07, "part00_pmfb_03", gf100_pmfb_sources },
- { 0x0d, "part00_pmfb_04", gf100_pmfb_sources },
- { 0x12, "part00_pmfb_05", gf100_pmfb_sources },
- { 0x13, "part00_pmfb_06", gf100_pmfb_sources },
- { 0x2c, "part00_pmfb_07", gf100_pmfb_sources },
- { 0x2d, "part00_pmfb_08", gf100_pmfb_sources },
- { 0x2e, "part00_pmfb_09", gf100_pmfb_sources },
- { 0x2f, "part00_pmfb_0a", gf100_pmfb_sources },
- { 0x30, "part00_pmfb_0b", gf100_pmfb_sources },
- {}
- }, &gf100_perfctr_func },
- {}
-};
-
-static const struct gf100_pm_func
-gf108_pm = {
- .doms_gpc = gf100_pm_gpc,
- .doms_hub = gf108_pm_hub,
- .doms_part = gf108_pm_part,
-};
-
-int
-gf108_pm_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_pm **ppm)
-{
- return gf100_pm_new_(&gf108_pm, device, type, inst, ppm);
-}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/gf117.c b/drivers/gpu/drm/nouveau/nvkm/engine/pm/gf117.c
deleted file mode 100644
index c61e8c010bb3..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/pm/gf117.c
+++ /dev/null
@@ -1,80 +0,0 @@
-/*
- * Copyright 2015 Samuel Pitoiset
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Samuel Pitoiset
- */
-#include "gf100.h"
-
-static const struct nvkm_specsrc
-gf117_pmfb_sources[] = {
- { 0x140028, (const struct nvkm_specmux[]) {
- { 0x3fff, 0, "unk0" },
- { 0x7, 16, "unk16" },
- { 0x3, 24, "unk24" },
- { 0x2, 28, "unk28" },
- {}
- }, "pmfb0_pm_unk28" },
- { 0x14125c, (const struct nvkm_specmux[]) {
- { 0x3fff, 0, "unk0" },
- {}
- }, "pmfb0_subp0_pm_unk25c" },
- {}
-};
-
-static const struct nvkm_specdom
-gf117_pm_hub[] = {
- {}
-};
-
-static const struct nvkm_specdom
-gf117_pm_part[] = {
- { 0xe0, (const struct nvkm_specsig[]) {
- { 0x00, "part00_pbfb_00", gf100_pbfb_sources },
- { 0x01, "part00_pbfb_01", gf100_pbfb_sources },
- { 0x12, "part00_pmfb_00", gf117_pmfb_sources },
- { 0x15, "part00_pmfb_01", gf117_pmfb_sources },
- { 0x16, "part00_pmfb_02", gf117_pmfb_sources },
- { 0x18, "part00_pmfb_03", gf117_pmfb_sources },
- { 0x1e, "part00_pmfb_04", gf117_pmfb_sources },
- { 0x23, "part00_pmfb_05", gf117_pmfb_sources },
- { 0x24, "part00_pmfb_06", gf117_pmfb_sources },
- { 0x0c, "part00_pmfb_07", gf117_pmfb_sources },
- { 0x0d, "part00_pmfb_08", gf117_pmfb_sources },
- { 0x0e, "part00_pmfb_09", gf117_pmfb_sources },
- { 0x0f, "part00_pmfb_0a", gf117_pmfb_sources },
- { 0x10, "part00_pmfb_0b", gf117_pmfb_sources },
- {}
- }, &gf100_perfctr_func },
- {}
-};
-
-static const struct gf100_pm_func
-gf117_pm = {
- .doms_gpc = gf100_pm_gpc,
- .doms_hub = gf117_pm_hub,
- .doms_part = gf117_pm_part,
-};
-
-int
-gf117_pm_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_pm **ppm)
-{
- return gf100_pm_new_(&gf117_pm, device, type, inst, ppm);
-}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/gk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/pm/gk104.c
deleted file mode 100644
index 75bf3df1cb18..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/pm/gk104.c
+++ /dev/null
@@ -1,184 +0,0 @@
-/*
- * Copyright 2013 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-#include "gf100.h"
-
-static const struct nvkm_specsrc
-gk104_pmfb_sources[] = {
- { 0x140028, (const struct nvkm_specmux[]) {
- { 0x3fff, 0, "unk0" },
- { 0x7, 16, "unk16" },
- { 0x3, 24, "unk24" },
- { 0x2, 28, "unk28" },
- {}
- }, "pmfb0_pm_unk28" },
- { 0x14125c, (const struct nvkm_specmux[]) {
- { 0x3fff, 0, "unk0" },
- {}
- }, "pmfb0_subp0_pm_unk25c" },
- { 0x14165c, (const struct nvkm_specmux[]) {
- { 0x3fff, 0, "unk0" },
- {}
- }, "pmfb0_subp1_pm_unk25c" },
- { 0x141a5c, (const struct nvkm_specmux[]) {
- { 0x3fff, 0, "unk0" },
- {}
- }, "pmfb0_subp2_pm_unk25c" },
- { 0x141e5c, (const struct nvkm_specmux[]) {
- { 0x3fff, 0, "unk0" },
- {}
- }, "pmfb0_subp3_pm_unk25c" },
- {}
-};
-
-static const struct nvkm_specsrc
-gk104_tex_sources[] = {
- { 0x5042c0, (const struct nvkm_specmux[]) {
- { 0xf, 0, "sel0", true },
- { 0x7, 8, "sel1", true },
- {}
- }, "pgraph_gpc0_tpc0_tex_pm_mux_c_d" },
- { 0x5042c8, (const struct nvkm_specmux[]) {
- { 0x1f, 0, "sel", true },
- {}
- }, "pgraph_gpc0_tpc0_tex_pm_unkc8" },
- { 0x5042b8, (const struct nvkm_specmux[]) {
- { 0xff, 0, "sel", true },
- {}
- }, "pgraph_gpc0_tpc0_tex_pm_unkb8" },
- {}
-};
-
-static const struct nvkm_specdom
-gk104_pm_hub[] = {
- { 0x60, (const struct nvkm_specsig[]) {
- { 0x47, "hub00_user_0" },
- {}
- }, &gf100_perfctr_func },
- { 0x40, (const struct nvkm_specsig[]) {
- { 0x27, "hub01_user_0" },
- {}
- }, &gf100_perfctr_func },
- { 0x60, (const struct nvkm_specsig[]) {
- { 0x47, "hub02_user_0" },
- {}
- }, &gf100_perfctr_func },
- { 0x60, (const struct nvkm_specsig[]) {
- { 0x47, "hub03_user_0" },
- {}
- }, &gf100_perfctr_func },
- { 0x40, (const struct nvkm_specsig[]) {
- { 0x03, "host_mmio_rd" },
- { 0x27, "hub04_user_0" },
- {}
- }, &gf100_perfctr_func },
- { 0x60, (const struct nvkm_specsig[]) {
- { 0x47, "hub05_user_0" },
- {}
- }, &gf100_perfctr_func },
- { 0xc0, (const struct nvkm_specsig[]) {
- { 0x74, "host_fb_rd3x" },
- { 0x75, "host_fb_rd3x_2" },
- { 0xa7, "hub06_user_0" },
- {}
- }, &gf100_perfctr_func },
- { 0x60, (const struct nvkm_specsig[]) {
- { 0x47, "hub07_user_0" },
- {}
- }, &gf100_perfctr_func },
- {}
-};
-
-static const struct nvkm_specdom
-gk104_pm_gpc[] = {
- { 0xe0, (const struct nvkm_specsig[]) {
- { 0xc7, "gpc00_user_0" },
- {}
- }, &gf100_perfctr_func },
- { 0x20, (const struct nvkm_specsig[]) {
- {}
- }, &gf100_perfctr_func },
- { 0x20, (const struct nvkm_specsig[]) {
- { 0x00, "gpc02_tex_00", gk104_tex_sources },
- { 0x01, "gpc02_tex_01", gk104_tex_sources },
- { 0x02, "gpc02_tex_02", gk104_tex_sources },
- { 0x03, "gpc02_tex_03", gk104_tex_sources },
- { 0x04, "gpc02_tex_04", gk104_tex_sources },
- { 0x05, "gpc02_tex_05", gk104_tex_sources },
- { 0x06, "gpc02_tex_06", gk104_tex_sources },
- { 0x07, "gpc02_tex_07", gk104_tex_sources },
- { 0x08, "gpc02_tex_08", gk104_tex_sources },
- { 0x0a, "gpc02_tex_0a", gk104_tex_sources },
- { 0x0b, "gpc02_tex_0b", gk104_tex_sources },
- { 0x0d, "gpc02_tex_0c", gk104_tex_sources },
- { 0x0c, "gpc02_tex_0d", gk104_tex_sources },
- { 0x0e, "gpc02_tex_0e", gk104_tex_sources },
- { 0x0f, "gpc02_tex_0f", gk104_tex_sources },
- { 0x10, "gpc02_tex_10", gk104_tex_sources },
- { 0x11, "gpc02_tex_11", gk104_tex_sources },
- { 0x12, "gpc02_tex_12", gk104_tex_sources },
- {}
- }, &gf100_perfctr_func },
- {}
-};
-
-static const struct nvkm_specdom
-gk104_pm_part[] = {
- { 0x60, (const struct nvkm_specsig[]) {
- { 0x00, "part00_pbfb_00", gf100_pbfb_sources },
- { 0x01, "part00_pbfb_01", gf100_pbfb_sources },
- { 0x0c, "part00_pmfb_00", gk104_pmfb_sources },
- { 0x0d, "part00_pmfb_01", gk104_pmfb_sources },
- { 0x0e, "part00_pmfb_02", gk104_pmfb_sources },
- { 0x0f, "part00_pmfb_03", gk104_pmfb_sources },
- { 0x10, "part00_pmfb_04", gk104_pmfb_sources },
- { 0x12, "part00_pmfb_05", gk104_pmfb_sources },
- { 0x15, "part00_pmfb_06", gk104_pmfb_sources },
- { 0x16, "part00_pmfb_07", gk104_pmfb_sources },
- { 0x18, "part00_pmfb_08", gk104_pmfb_sources },
- { 0x21, "part00_pmfb_09", gk104_pmfb_sources },
- { 0x25, "part00_pmfb_0a", gk104_pmfb_sources },
- { 0x26, "part00_pmfb_0b", gk104_pmfb_sources },
- { 0x27, "part00_pmfb_0c", gk104_pmfb_sources },
- { 0x47, "part00_user_0" },
- {}
- }, &gf100_perfctr_func },
- { 0x60, (const struct nvkm_specsig[]) {
- { 0x47, "part01_user_0" },
- {}
- }, &gf100_perfctr_func },
- {}
-};
-
-static const struct gf100_pm_func
-gk104_pm = {
- .doms_gpc = gk104_pm_gpc,
- .doms_hub = gk104_pm_hub,
- .doms_part = gk104_pm_part,
-};
-
-int
-gk104_pm_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_pm **ppm)
-{
- return gf100_pm_new_(&gk104_pm, device, type, inst, ppm);
-}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/gt200.c b/drivers/gpu/drm/nouveau/nvkm/engine/pm/gt200.c
deleted file mode 100644
index 25874c541486..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/pm/gt200.c
+++ /dev/null
@@ -1,157 +0,0 @@
-/*
- * Copyright 2015 Nouveau project
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Samuel Pitoiset
- */
-#include "nv40.h"
-
-const struct nvkm_specsrc
-gt200_crop_sources[] = {
- { 0x407008, (const struct nvkm_specmux[]) {
- { 0xf, 0, "sel0", true },
- { 0x1f, 16, "sel1", true },
- {}
- }, "pgraph_rop0_crop_pm_mux" },
- {}
-};
-
-const struct nvkm_specsrc
-gt200_prop_sources[] = {
- { 0x408750, (const struct nvkm_specmux[]) {
- { 0x3f, 0, "sel", true },
- {}
- }, "pgraph_tpc0_prop_pm_mux" },
- {}
-};
-
-const struct nvkm_specsrc
-gt200_tex_sources[] = {
- { 0x408508, (const struct nvkm_specmux[]) {
- { 0xfffff, 0, "unk0" },
- {}
- }, "pgraph_tpc0_tex_unk08" },
- {}
-};
-
-static const struct nvkm_specdom
-gt200_pm[] = {
- { 0x20, (const struct nvkm_specsig[]) {
- {}
- }, &nv40_perfctr_func },
- { 0xf0, (const struct nvkm_specsig[]) {
- { 0xc9, "pc01_gr_idle" },
- { 0x84, "pc01_strmout_00" },
- { 0x85, "pc01_strmout_01" },
- { 0xde, "pc01_trast_00" },
- { 0xdf, "pc01_trast_01" },
- { 0xe0, "pc01_trast_02" },
- { 0xe1, "pc01_trast_03" },
- { 0xe4, "pc01_trast_04" },
- { 0xe5, "pc01_trast_05" },
- { 0x82, "pc01_vattr_00" },
- { 0x83, "pc01_vattr_01" },
- { 0x46, "pc01_vfetch_00", g84_vfetch_sources },
- { 0x47, "pc01_vfetch_01", g84_vfetch_sources },
- { 0x48, "pc01_vfetch_02", g84_vfetch_sources },
- { 0x49, "pc01_vfetch_03", g84_vfetch_sources },
- { 0x4a, "pc01_vfetch_04", g84_vfetch_sources },
- { 0x4b, "pc01_vfetch_05", g84_vfetch_sources },
- { 0x4c, "pc01_vfetch_06", g84_vfetch_sources },
- { 0x4d, "pc01_vfetch_07", g84_vfetch_sources },
- { 0x4e, "pc01_vfetch_08", g84_vfetch_sources },
- { 0x4f, "pc01_vfetch_09", g84_vfetch_sources },
- { 0x50, "pc01_vfetch_0a", g84_vfetch_sources },
- { 0x51, "pc01_vfetch_0b", g84_vfetch_sources },
- { 0x52, "pc01_vfetch_0c", g84_vfetch_sources },
- { 0x53, "pc01_vfetch_0d", g84_vfetch_sources },
- { 0x54, "pc01_vfetch_0e", g84_vfetch_sources },
- { 0x55, "pc01_vfetch_0f", g84_vfetch_sources },
- { 0x56, "pc01_vfetch_10", g84_vfetch_sources },
- { 0x57, "pc01_vfetch_11", g84_vfetch_sources },
- { 0x58, "pc01_vfetch_12", g84_vfetch_sources },
- { 0x59, "pc01_vfetch_13", g84_vfetch_sources },
- { 0x5a, "pc01_vfetch_14", g84_vfetch_sources },
- { 0x5b, "pc01_vfetch_15", g84_vfetch_sources },
- { 0x5c, "pc01_vfetch_16", g84_vfetch_sources },
- { 0x5d, "pc01_vfetch_17", g84_vfetch_sources },
- { 0x5e, "pc01_vfetch_18", g84_vfetch_sources },
- { 0x5f, "pc01_vfetch_19", g84_vfetch_sources },
- { 0x07, "pc01_zcull_00", nv50_zcull_sources },
- { 0x08, "pc01_zcull_01", nv50_zcull_sources },
- { 0x09, "pc01_zcull_02", nv50_zcull_sources },
- { 0x0a, "pc01_zcull_03", nv50_zcull_sources },
- { 0x0b, "pc01_zcull_04", nv50_zcull_sources },
- { 0x0c, "pc01_zcull_05", nv50_zcull_sources },
-
- { 0xb0, "pc01_unk00" },
- { 0xec, "pc01_trailer" },
- {}
- }, &nv40_perfctr_func },
- { 0xf0, (const struct nvkm_specsig[]) {
- { 0x55, "pc02_crop_00", gt200_crop_sources },
- { 0x56, "pc02_crop_01", gt200_crop_sources },
- { 0x57, "pc02_crop_02", gt200_crop_sources },
- { 0x58, "pc02_crop_03", gt200_crop_sources },
- { 0x00, "pc02_prop_00", gt200_prop_sources },
- { 0x01, "pc02_prop_01", gt200_prop_sources },
- { 0x02, "pc02_prop_02", gt200_prop_sources },
- { 0x03, "pc02_prop_03", gt200_prop_sources },
- { 0x04, "pc02_prop_04", gt200_prop_sources },
- { 0x05, "pc02_prop_05", gt200_prop_sources },
- { 0x06, "pc02_prop_06", gt200_prop_sources },
- { 0x07, "pc02_prop_07", gt200_prop_sources },
- { 0x78, "pc02_tex_00", gt200_tex_sources },
- { 0x79, "pc02_tex_01", gt200_tex_sources },
- { 0x7a, "pc02_tex_02", gt200_tex_sources },
- { 0x7b, "pc02_tex_03", gt200_tex_sources },
- { 0x32, "pc02_tex_04", gt200_tex_sources },
- { 0x33, "pc02_tex_05", gt200_tex_sources },
- { 0x34, "pc02_tex_06", gt200_tex_sources },
- { 0x74, "pc02_zrop_00", nv50_zrop_sources },
- { 0x75, "pc02_zrop_01", nv50_zrop_sources },
- { 0x76, "pc02_zrop_02", nv50_zrop_sources },
- { 0x77, "pc02_zrop_03", nv50_zrop_sources },
- { 0xec, "pc02_trailer" },
- {}
- }, &nv40_perfctr_func },
- { 0x20, (const struct nvkm_specsig[]) {
- {}
- }, &nv40_perfctr_func },
- { 0x20, (const struct nvkm_specsig[]) {
- {}
- }, &nv40_perfctr_func },
- { 0x20, (const struct nvkm_specsig[]) {
- {}
- }, &nv40_perfctr_func },
- { 0x20, (const struct nvkm_specsig[]) {
- {}
- }, &nv40_perfctr_func },
- { 0x20, (const struct nvkm_specsig[]) {
- {}
- }, &nv40_perfctr_func },
- {}
-};
-
-int
-gt200_pm_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_pm **ppm)
-{
- return nv40_pm_new_(gt200_pm, device, type, inst, ppm);
-}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/gt215.c b/drivers/gpu/drm/nouveau/nvkm/engine/pm/gt215.c
deleted file mode 100644
index 54c23e2b6645..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/pm/gt215.c
+++ /dev/null
@@ -1,138 +0,0 @@
-/*
- * Copyright 2013 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-#include "nv40.h"
-
-static const struct nvkm_specsrc
-gt215_zcull_sources[] = {
- { 0x402ca4, (const struct nvkm_specmux[]) {
- { 0x7fff, 0, "unk0" },
- { 0xff, 24, "unk24" },
- {}
- }, "pgraph_zcull_pm_unka4" },
- {}
-};
-
-static const struct nvkm_specdom
-gt215_pm[] = {
- { 0x20, (const struct nvkm_specsig[]) {
- {}
- }, &nv40_perfctr_func },
- { 0xf0, (const struct nvkm_specsig[]) {
- { 0xcb, "pc01_gr_idle" },
- { 0x86, "pc01_strmout_00" },
- { 0x87, "pc01_strmout_01" },
- { 0xe0, "pc01_trast_00" },
- { 0xe1, "pc01_trast_01" },
- { 0xe2, "pc01_trast_02" },
- { 0xe3, "pc01_trast_03" },
- { 0xe6, "pc01_trast_04" },
- { 0xe7, "pc01_trast_05" },
- { 0x84, "pc01_vattr_00" },
- { 0x85, "pc01_vattr_01" },
- { 0x46, "pc01_vfetch_00", g84_vfetch_sources },
- { 0x47, "pc01_vfetch_01", g84_vfetch_sources },
- { 0x48, "pc01_vfetch_02", g84_vfetch_sources },
- { 0x49, "pc01_vfetch_03", g84_vfetch_sources },
- { 0x4a, "pc01_vfetch_04", g84_vfetch_sources },
- { 0x4b, "pc01_vfetch_05", g84_vfetch_sources },
- { 0x4c, "pc01_vfetch_06", g84_vfetch_sources },
- { 0x4d, "pc01_vfetch_07", g84_vfetch_sources },
- { 0x4e, "pc01_vfetch_08", g84_vfetch_sources },
- { 0x4f, "pc01_vfetch_09", g84_vfetch_sources },
- { 0x50, "pc01_vfetch_0a", g84_vfetch_sources },
- { 0x51, "pc01_vfetch_0b", g84_vfetch_sources },
- { 0x52, "pc01_vfetch_0c", g84_vfetch_sources },
- { 0x53, "pc01_vfetch_0d", g84_vfetch_sources },
- { 0x54, "pc01_vfetch_0e", g84_vfetch_sources },
- { 0x55, "pc01_vfetch_0f", g84_vfetch_sources },
- { 0x56, "pc01_vfetch_10", g84_vfetch_sources },
- { 0x57, "pc01_vfetch_11", g84_vfetch_sources },
- { 0x58, "pc01_vfetch_12", g84_vfetch_sources },
- { 0x59, "pc01_vfetch_13", g84_vfetch_sources },
- { 0x5a, "pc01_vfetch_14", g84_vfetch_sources },
- { 0x5b, "pc01_vfetch_15", g84_vfetch_sources },
- { 0x5c, "pc01_vfetch_16", g84_vfetch_sources },
- { 0x5d, "pc01_vfetch_17", g84_vfetch_sources },
- { 0x5e, "pc01_vfetch_18", g84_vfetch_sources },
- { 0x5f, "pc01_vfetch_19", g84_vfetch_sources },
- { 0x07, "pc01_zcull_00", gt215_zcull_sources },
- { 0x08, "pc01_zcull_01", gt215_zcull_sources },
- { 0x09, "pc01_zcull_02", gt215_zcull_sources },
- { 0x0a, "pc01_zcull_03", gt215_zcull_sources },
- { 0x0b, "pc01_zcull_04", gt215_zcull_sources },
- { 0x0c, "pc01_zcull_05", gt215_zcull_sources },
- { 0xb2, "pc01_unk00" },
- { 0xec, "pc01_trailer" },
- {}
- }, &nv40_perfctr_func },
- { 0xe0, (const struct nvkm_specsig[]) {
- { 0x64, "pc02_crop_00", gt200_crop_sources },
- { 0x65, "pc02_crop_01", gt200_crop_sources },
- { 0x66, "pc02_crop_02", gt200_crop_sources },
- { 0x67, "pc02_crop_03", gt200_crop_sources },
- { 0x00, "pc02_prop_00", gt200_prop_sources },
- { 0x01, "pc02_prop_01", gt200_prop_sources },
- { 0x02, "pc02_prop_02", gt200_prop_sources },
- { 0x03, "pc02_prop_03", gt200_prop_sources },
- { 0x04, "pc02_prop_04", gt200_prop_sources },
- { 0x05, "pc02_prop_05", gt200_prop_sources },
- { 0x06, "pc02_prop_06", gt200_prop_sources },
- { 0x07, "pc02_prop_07", gt200_prop_sources },
- { 0x80, "pc02_tex_00", gt200_tex_sources },
- { 0x81, "pc02_tex_01", gt200_tex_sources },
- { 0x82, "pc02_tex_02", gt200_tex_sources },
- { 0x83, "pc02_tex_03", gt200_tex_sources },
- { 0x3a, "pc02_tex_04", gt200_tex_sources },
- { 0x3b, "pc02_tex_05", gt200_tex_sources },
- { 0x3c, "pc02_tex_06", gt200_tex_sources },
- { 0x7c, "pc02_zrop_00", nv50_zrop_sources },
- { 0x7d, "pc02_zrop_01", nv50_zrop_sources },
- { 0x7e, "pc02_zrop_02", nv50_zrop_sources },
- { 0x7f, "pc02_zrop_03", nv50_zrop_sources },
- { 0xcc, "pc02_trailer" },
- {}
- }, &nv40_perfctr_func },
- { 0x20, (const struct nvkm_specsig[]) {
- {}
- }, &nv40_perfctr_func },
- { 0x20, (const struct nvkm_specsig[]) {
- {}
- }, &nv40_perfctr_func },
- { 0x20, (const struct nvkm_specsig[]) {
- {}
- }, &nv40_perfctr_func },
- { 0x20, (const struct nvkm_specsig[]) {
- {}
- }, &nv40_perfctr_func },
- { 0x20, (const struct nvkm_specsig[]) {
- {}
- }, &nv40_perfctr_func },
- {}
-};
-
-int
-gt215_pm_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_pm **ppm)
-{
- return nv40_pm_new_(gt215_pm, device, type, inst, ppm);
-}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/nv40.c b/drivers/gpu/drm/nouveau/nvkm/engine/pm/nv40.c
deleted file mode 100644
index eba5b3b79340..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/pm/nv40.c
+++ /dev/null
@@ -1,123 +0,0 @@
-/*
- * Copyright 2013 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-#include "nv40.h"
-
-static void
-nv40_perfctr_init(struct nvkm_pm *pm, struct nvkm_perfdom *dom,
- struct nvkm_perfctr *ctr)
-{
- struct nvkm_device *device = pm->engine.subdev.device;
- u32 log = ctr->logic_op;
- u32 src = 0x00000000;
- int i;
-
- for (i = 0; i < 4; i++)
- src |= ctr->signal[i] << (i * 8);
-
- nvkm_wr32(device, 0x00a7c0 + dom->addr, 0x00000001 | (dom->mode << 4));
- nvkm_wr32(device, 0x00a400 + dom->addr + (ctr->slot * 0x40), src);
- nvkm_wr32(device, 0x00a420 + dom->addr + (ctr->slot * 0x40), log);
-}
-
-static void
-nv40_perfctr_read(struct nvkm_pm *pm, struct nvkm_perfdom *dom,
- struct nvkm_perfctr *ctr)
-{
- struct nvkm_device *device = pm->engine.subdev.device;
-
- switch (ctr->slot) {
- case 0: ctr->ctr = nvkm_rd32(device, 0x00a700 + dom->addr); break;
- case 1: ctr->ctr = nvkm_rd32(device, 0x00a6c0 + dom->addr); break;
- case 2: ctr->ctr = nvkm_rd32(device, 0x00a680 + dom->addr); break;
- case 3: ctr->ctr = nvkm_rd32(device, 0x00a740 + dom->addr); break;
- }
- dom->clk = nvkm_rd32(device, 0x00a600 + dom->addr);
-}
-
-static void
-nv40_perfctr_next(struct nvkm_pm *pm, struct nvkm_perfdom *dom)
-{
- struct nvkm_device *device = pm->engine.subdev.device;
- struct nv40_pm *nv40pm = container_of(pm, struct nv40_pm, base);
-
- if (nv40pm->sequence != pm->sequence) {
- nvkm_wr32(device, 0x400084, 0x00000020);
- nv40pm->sequence = pm->sequence;
- }
-}
-
-const struct nvkm_funcdom
-nv40_perfctr_func = {
- .init = nv40_perfctr_init,
- .read = nv40_perfctr_read,
- .next = nv40_perfctr_next,
-};
-
-static const struct nvkm_pm_func
-nv40_pm_ = {
-};
-
-int
-nv40_pm_new_(const struct nvkm_specdom *doms, struct nvkm_device *device,
- enum nvkm_subdev_type type, int inst, struct nvkm_pm **ppm)
-{
- struct nv40_pm *pm;
- int ret;
-
- if (!(pm = kzalloc(sizeof(*pm), GFP_KERNEL)))
- return -ENOMEM;
- *ppm = &pm->base;
-
- ret = nvkm_pm_ctor(&nv40_pm_, device, type, inst, &pm->base);
- if (ret)
- return ret;
-
- return nvkm_perfdom_new(&pm->base, "pc", 0, 0, 0, 4, doms);
-}
-
-static const struct nvkm_specdom
-nv40_pm[] = {
- { 0x20, (const struct nvkm_specsig[]) {
- {}
- }, &nv40_perfctr_func },
- { 0x20, (const struct nvkm_specsig[]) {
- {}
- }, &nv40_perfctr_func },
- { 0x20, (const struct nvkm_specsig[]) {
- {}
- }, &nv40_perfctr_func },
- { 0x20, (const struct nvkm_specsig[]) {
- {}
- }, &nv40_perfctr_func },
- { 0x20, (const struct nvkm_specsig[]) {
- {}
- }, &nv40_perfctr_func },
- {}
-};
-
-int
-nv40_pm_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_pm **ppm)
-{
- return nv40_pm_new_(nv40_pm, device, type, inst, ppm);
-}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/nv40.h b/drivers/gpu/drm/nouveau/nvkm/engine/pm/nv40.h
deleted file mode 100644
index afb79843723d..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/pm/nv40.h
+++ /dev/null
@@ -1,15 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-#ifndef __NVKM_PM_NV40_H__
-#define __NVKM_PM_NV40_H__
-#define nv40_pm(p) container_of((p), struct nv40_pm, base)
-#include "priv.h"
-
-struct nv40_pm {
- struct nvkm_pm base;
- u32 sequence;
-};
-
-int nv40_pm_new_(const struct nvkm_specdom *, struct nvkm_device *, enum nvkm_subdev_type, int,
- struct nvkm_pm **);
-extern const struct nvkm_funcdom nv40_perfctr_func;
-#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/nv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/pm/nv50.c
deleted file mode 100644
index bbd3404901f9..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/pm/nv50.c
+++ /dev/null
@@ -1,175 +0,0 @@
-/*
- * Copyright 2013 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-#include "nv40.h"
-
-const struct nvkm_specsrc
-nv50_zcull_sources[] = {
- { 0x402ca4, (const struct nvkm_specmux[]) {
- { 0x7fff, 0, "unk0" },
- {}
- }, "pgraph_zcull_pm_unka4" },
- {}
-};
-
-const struct nvkm_specsrc
-nv50_zrop_sources[] = {
- { 0x40708c, (const struct nvkm_specmux[]) {
- { 0xf, 0, "sel0", true },
- { 0xf, 16, "sel1", true },
- {}
- }, "pgraph_rop0_zrop_pm_mux" },
- {}
-};
-
-static const struct nvkm_specsrc
-nv50_prop_sources[] = {
- { 0x40be50, (const struct nvkm_specmux[]) {
- { 0x1f, 0, "sel", true },
- {}
- }, "pgraph_tpc3_prop_pm_mux" },
- {}
-};
-
-static const struct nvkm_specsrc
-nv50_crop_sources[] = {
- { 0x407008, (const struct nvkm_specmux[]) {
- { 0x7, 0, "sel0", true },
- { 0x7, 16, "sel1", true },
- {}
- }, "pgraph_rop0_crop_pm_mux" },
- {}
-};
-
-static const struct nvkm_specsrc
-nv50_tex_sources[] = {
- { 0x40b808, (const struct nvkm_specmux[]) {
- { 0x3fff, 0, "unk0" },
- {}
- }, "pgraph_tpc3_tex_unk08" },
- {}
-};
-
-static const struct nvkm_specsrc
-nv50_vfetch_sources[] = {
- { 0x400c0c, (const struct nvkm_specmux[]) {
- { 0x1, 0, "unk0" },
- {}
- }, "pgraph_vfetch_unk0c" },
- {}
-};
-
-static const struct nvkm_specdom
-nv50_pm[] = {
- { 0x20, (const struct nvkm_specsig[]) {
- {}
- }, &nv40_perfctr_func },
- { 0xf0, (const struct nvkm_specsig[]) {
- { 0xc8, "pc01_gr_idle" },
- { 0x7f, "pc01_strmout_00" },
- { 0x80, "pc01_strmout_01" },
- { 0xdc, "pc01_trast_00" },
- { 0xdd, "pc01_trast_01" },
- { 0xde, "pc01_trast_02" },
- { 0xdf, "pc01_trast_03" },
- { 0xe2, "pc01_trast_04" },
- { 0xe3, "pc01_trast_05" },
- { 0x7c, "pc01_vattr_00" },
- { 0x7d, "pc01_vattr_01" },
- { 0x26, "pc01_vfetch_00", nv50_vfetch_sources },
- { 0x27, "pc01_vfetch_01", nv50_vfetch_sources },
- { 0x28, "pc01_vfetch_02", nv50_vfetch_sources },
- { 0x29, "pc01_vfetch_03", nv50_vfetch_sources },
- { 0x2a, "pc01_vfetch_04", nv50_vfetch_sources },
- { 0x2b, "pc01_vfetch_05", nv50_vfetch_sources },
- { 0x2c, "pc01_vfetch_06", nv50_vfetch_sources },
- { 0x2d, "pc01_vfetch_07", nv50_vfetch_sources },
- { 0x2e, "pc01_vfetch_08", nv50_vfetch_sources },
- { 0x2f, "pc01_vfetch_09", nv50_vfetch_sources },
- { 0x30, "pc01_vfetch_0a", nv50_vfetch_sources },
- { 0x31, "pc01_vfetch_0b", nv50_vfetch_sources },
- { 0x32, "pc01_vfetch_0c", nv50_vfetch_sources },
- { 0x33, "pc01_vfetch_0d", nv50_vfetch_sources },
- { 0x34, "pc01_vfetch_0e", nv50_vfetch_sources },
- { 0x35, "pc01_vfetch_0f", nv50_vfetch_sources },
- { 0x36, "pc01_vfetch_10", nv50_vfetch_sources },
- { 0x37, "pc01_vfetch_11", nv50_vfetch_sources },
- { 0x38, "pc01_vfetch_12", nv50_vfetch_sources },
- { 0x39, "pc01_vfetch_13", nv50_vfetch_sources },
- { 0x3a, "pc01_vfetch_14", nv50_vfetch_sources },
- { 0x3b, "pc01_vfetch_15", nv50_vfetch_sources },
- { 0x3c, "pc01_vfetch_16", nv50_vfetch_sources },
- { 0x3d, "pc01_vfetch_17", nv50_vfetch_sources },
- { 0x3e, "pc01_vfetch_18", nv50_vfetch_sources },
- { 0x3f, "pc01_vfetch_19", nv50_vfetch_sources },
- { 0x20, "pc01_zcull_00", nv50_zcull_sources },
- { 0x21, "pc01_zcull_01", nv50_zcull_sources },
- { 0x22, "pc01_zcull_02", nv50_zcull_sources },
- { 0x23, "pc01_zcull_03", nv50_zcull_sources },
- { 0x24, "pc01_zcull_04", nv50_zcull_sources },
- { 0x25, "pc01_zcull_05", nv50_zcull_sources },
- { 0xae, "pc01_unk00" },
- { 0xee, "pc01_trailer" },
- {}
- }, &nv40_perfctr_func },
- { 0xf0, (const struct nvkm_specsig[]) {
- { 0x52, "pc02_crop_00", nv50_crop_sources },
- { 0x53, "pc02_crop_01", nv50_crop_sources },
- { 0x54, "pc02_crop_02", nv50_crop_sources },
- { 0x55, "pc02_crop_03", nv50_crop_sources },
- { 0x00, "pc02_prop_00", nv50_prop_sources },
- { 0x01, "pc02_prop_01", nv50_prop_sources },
- { 0x02, "pc02_prop_02", nv50_prop_sources },
- { 0x03, "pc02_prop_03", nv50_prop_sources },
- { 0x04, "pc02_prop_04", nv50_prop_sources },
- { 0x05, "pc02_prop_05", nv50_prop_sources },
- { 0x06, "pc02_prop_06", nv50_prop_sources },
- { 0x07, "pc02_prop_07", nv50_prop_sources },
- { 0x70, "pc02_tex_00", nv50_tex_sources },
- { 0x71, "pc02_tex_01", nv50_tex_sources },
- { 0x72, "pc02_tex_02", nv50_tex_sources },
- { 0x73, "pc02_tex_03", nv50_tex_sources },
- { 0x40, "pc02_tex_04", nv50_tex_sources },
- { 0x41, "pc02_tex_05", nv50_tex_sources },
- { 0x42, "pc02_tex_06", nv50_tex_sources },
- { 0x6c, "pc02_zrop_00", nv50_zrop_sources },
- { 0x6d, "pc02_zrop_01", nv50_zrop_sources },
- { 0x6e, "pc02_zrop_02", nv50_zrop_sources },
- { 0x6f, "pc02_zrop_03", nv50_zrop_sources },
- { 0xee, "pc02_trailer" },
- {}
- }, &nv40_perfctr_func },
- { 0x20, (const struct nvkm_specsig[]) {
- {}
- }, &nv40_perfctr_func },
- { 0x20, (const struct nvkm_specsig[]) {
- {}
- }, &nv40_perfctr_func },
- {}
-};
-
-int
-nv50_pm_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_pm **ppm)
-{
- return nv40_pm_new_(nv50_pm, device, type, inst, ppm);
-}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/pm/priv.h
deleted file mode 100644
index c011227f7052..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/pm/priv.h
+++ /dev/null
@@ -1,105 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-#ifndef __NVKM_PM_PRIV_H__
-#define __NVKM_PM_PRIV_H__
-#define nvkm_pm(p) container_of((p), struct nvkm_pm, engine)
-#include <engine/pm.h>
-
-int nvkm_pm_ctor(const struct nvkm_pm_func *, struct nvkm_device *, enum nvkm_subdev_type, int,
- struct nvkm_pm *);
-
-struct nvkm_pm_func {
- void (*fini)(struct nvkm_pm *);
-};
-
-struct nvkm_perfctr {
- struct list_head head;
- u8 domain;
- u8 signal[4];
- u64 source[4][8];
- int slot;
- u32 logic_op;
- u32 ctr;
-};
-
-struct nvkm_specmux {
- u32 mask;
- u8 shift;
- const char *name;
- bool enable;
-};
-
-struct nvkm_specsrc {
- u32 addr;
- const struct nvkm_specmux *mux;
- const char *name;
-};
-
-struct nvkm_perfsrc {
- struct list_head head;
- char *name;
- u32 addr;
- u32 mask;
- u8 shift;
- bool enable;
-};
-
-extern const struct nvkm_specsrc nv50_zcull_sources[];
-extern const struct nvkm_specsrc nv50_zrop_sources[];
-extern const struct nvkm_specsrc g84_vfetch_sources[];
-extern const struct nvkm_specsrc gt200_crop_sources[];
-extern const struct nvkm_specsrc gt200_prop_sources[];
-extern const struct nvkm_specsrc gt200_tex_sources[];
-
-struct nvkm_specsig {
- u8 signal;
- const char *name;
- const struct nvkm_specsrc *source;
-};
-
-struct nvkm_perfsig {
- const char *name;
- u8 source[8];
-};
-
-struct nvkm_specdom {
- u16 signal_nr;
- const struct nvkm_specsig *signal;
- const struct nvkm_funcdom *func;
-};
-
-#define nvkm_perfdom(p) container_of((p), struct nvkm_perfdom, object)
-#include <core/object.h>
-
-struct nvkm_perfdom {
- struct nvkm_object object;
- struct nvkm_perfmon *perfmon;
- struct list_head head;
- struct list_head list;
- const struct nvkm_funcdom *func;
- struct nvkm_perfctr *ctr[4];
- char name[32];
- u32 addr;
- u8 mode;
- u32 clk;
- u16 signal_nr;
- struct nvkm_perfsig signal[] __counted_by(signal_nr);
-};
-
-struct nvkm_funcdom {
- void (*init)(struct nvkm_pm *, struct nvkm_perfdom *,
- struct nvkm_perfctr *);
- void (*read)(struct nvkm_pm *, struct nvkm_perfdom *,
- struct nvkm_perfctr *);
- void (*next)(struct nvkm_pm *, struct nvkm_perfdom *);
-};
-
-int nvkm_perfdom_new(struct nvkm_pm *, const char *, u32, u32, u32, u32,
- const struct nvkm_specdom *);
-
-#define nvkm_perfmon(p) container_of((p), struct nvkm_perfmon, object)
-
-struct nvkm_perfmon {
- struct nvkm_object object;
- struct nvkm_pm *pm;
-};
-#endif
diff --git a/drivers/gpu/drm/omapdrm/dss/base.c b/drivers/gpu/drm/omapdrm/dss/base.c
index 050ca7eafac5..5f8002f6bb7a 100644
--- a/drivers/gpu/drm/omapdrm/dss/base.c
+++ b/drivers/gpu/drm/omapdrm/dss/base.c
@@ -242,8 +242,7 @@ static void omapdss_walk_device(struct device *dev, struct device_node *node,
of_node_put(n);
- n = NULL;
- while ((n = of_graph_get_next_endpoint(node, n)) != NULL) {
+ for_each_endpoint_of_node(node, n) {
struct device_node *pn = of_graph_get_remote_port_parent(n);
if (!pn)
diff --git a/drivers/gpu/drm/panel/panel-boe-bf060y8m-aj0.c b/drivers/gpu/drm/panel/panel-boe-bf060y8m-aj0.c
index e77db8597eb7..7e66db4a88bb 100644
--- a/drivers/gpu/drm/panel/panel-boe-bf060y8m-aj0.c
+++ b/drivers/gpu/drm/panel/panel-boe-bf060y8m-aj0.c
@@ -377,6 +377,8 @@ static int boe_bf060y8m_aj0_probe(struct mipi_dsi_device *dsi)
drm_panel_init(&boe->panel, dev, &boe_bf060y8m_aj0_panel_funcs,
DRM_MODE_CONNECTOR_DSI);
+ boe->panel.prepare_prev_first = true;
+
boe->panel.backlight = boe_bf060y8m_aj0_create_backlight(dsi);
if (IS_ERR(boe->panel.backlight))
return dev_err_probe(dev, PTR_ERR(boe->panel.backlight),
diff --git a/drivers/gpu/drm/panel/panel-boe-th101mb31ig002-28a.c b/drivers/gpu/drm/panel/panel-boe-th101mb31ig002-28a.c
index 763e9f8342d3..0b87f1e6ecae 100644
--- a/drivers/gpu/drm/panel/panel-boe-th101mb31ig002-28a.c
+++ b/drivers/gpu/drm/panel/panel-boe-th101mb31ig002-28a.c
@@ -16,12 +16,31 @@
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_modes.h>
#include <drm/drm_panel.h>
+#include <drm/drm_probe_helper.h>
+
+struct boe_th101mb31ig002;
+
+struct panel_desc {
+ const struct drm_display_mode *modes;
+ unsigned long mode_flags;
+ enum mipi_dsi_pixel_format format;
+ int (*init)(struct boe_th101mb31ig002 *ctx);
+ unsigned int lanes;
+ bool lp11_before_reset;
+ unsigned int vcioo_to_lp11_delay_ms;
+ unsigned int lp11_to_reset_delay_ms;
+ unsigned int backlight_off_to_display_off_delay_ms;
+ unsigned int enter_sleep_to_reset_down_delay_ms;
+ unsigned int power_off_delay_ms;
+};
struct boe_th101mb31ig002 {
struct drm_panel panel;
struct mipi_dsi_device *dsi;
+ const struct panel_desc *desc;
+
struct regulator *power;
struct gpio_desc *enable;
struct gpio_desc *reset;
@@ -39,74 +58,123 @@ static void boe_th101mb31ig002_reset(struct boe_th101mb31ig002 *ctx)
usleep_range(5000, 6000);
}
-static int boe_th101mb31ig002_enable(struct drm_panel *panel)
+static int boe_th101mb31ig002_enable(struct boe_th101mb31ig002 *ctx)
{
- struct boe_th101mb31ig002 *ctx = container_of(panel,
- struct boe_th101mb31ig002,
- panel);
- struct mipi_dsi_device *dsi = ctx->dsi;
- struct device *dev = &dsi->dev;
- int ret;
-
- mipi_dsi_dcs_write_seq(dsi, 0xE0, 0xAB, 0xBA);
- mipi_dsi_dcs_write_seq(dsi, 0xE1, 0xBA, 0xAB);
- mipi_dsi_dcs_write_seq(dsi, 0xB1, 0x10, 0x01, 0x47, 0xFF);
- mipi_dsi_dcs_write_seq(dsi, 0xB2, 0x0C, 0x14, 0x04, 0x50, 0x50, 0x14);
- mipi_dsi_dcs_write_seq(dsi, 0xB3, 0x56, 0x53, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0xB4, 0x33, 0x30, 0x04);
- mipi_dsi_dcs_write_seq(dsi, 0xB6, 0xB0, 0x00, 0x00, 0x10, 0x00, 0x10,
- 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0xB8, 0x05, 0x12, 0x29, 0x49, 0x48, 0x00,
- 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0xB9, 0x7C, 0x65, 0x55, 0x49, 0x46, 0x36,
- 0x3B, 0x24, 0x3D, 0x3C, 0x3D, 0x5C, 0x4C,
- 0x55, 0x47, 0x46, 0x39, 0x26, 0x06, 0x7C,
- 0x65, 0x55, 0x49, 0x46, 0x36, 0x3B, 0x24,
- 0x3D, 0x3C, 0x3D, 0x5C, 0x4C, 0x55, 0x47,
- 0x46, 0x39, 0x26, 0x06);
- mipi_dsi_dcs_write_seq(dsi, 0x00, 0xFF, 0x87, 0x12, 0x34, 0x44, 0x44,
- 0x44, 0x44, 0x98, 0x04, 0x98, 0x04, 0x0F,
- 0x00, 0x00, 0xC1);
- mipi_dsi_dcs_write_seq(dsi, 0xC1, 0x54, 0x94, 0x02, 0x85, 0x9F, 0x00,
- 0x7F, 0x00, 0x54, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0xC2, 0x17, 0x09, 0x08, 0x89, 0x08, 0x11,
- 0x22, 0x20, 0x44, 0xFF, 0x18, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0xC3, 0x86, 0x46, 0x05, 0x05, 0x1C, 0x1C,
- 0x1D, 0x1D, 0x02, 0x1F, 0x1F, 0x1E, 0x1E,
- 0x0F, 0x0F, 0x0D, 0x0D, 0x13, 0x13, 0x11,
- 0x11, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0xC4, 0x07, 0x07, 0x04, 0x04, 0x1C, 0x1C,
- 0x1D, 0x1D, 0x02, 0x1F, 0x1F, 0x1E, 0x1E,
- 0x0E, 0x0E, 0x0C, 0x0C, 0x12, 0x12, 0x10,
- 0x10, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0xC6, 0x2A, 0x2A);
- mipi_dsi_dcs_write_seq(dsi, 0xC8, 0x21, 0x00, 0x31, 0x42, 0x34, 0x16);
- mipi_dsi_dcs_write_seq(dsi, 0xCA, 0xCB, 0x43);
- mipi_dsi_dcs_write_seq(dsi, 0xCD, 0x0E, 0x4B, 0x4B, 0x20, 0x19, 0x6B,
- 0x06, 0xB3);
- mipi_dsi_dcs_write_seq(dsi, 0xD2, 0xE3, 0x2B, 0x38, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0xD4, 0x00, 0x01, 0x00, 0x0E, 0x04, 0x44,
- 0x08, 0x10, 0x00, 0x00, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0xE6, 0x80, 0x01, 0xFF, 0xFF, 0xFF, 0xFF,
- 0xFF, 0xFF);
- mipi_dsi_dcs_write_seq(dsi, 0xF0, 0x12, 0x03, 0x20, 0x00, 0xFF);
- mipi_dsi_dcs_write_seq(dsi, 0xF3, 0x00);
-
- ret = mipi_dsi_dcs_exit_sleep_mode(dsi);
- if (ret < 0) {
- dev_err(dev, "Failed to exit sleep mode: %d\n", ret);
- return ret;
- }
-
- msleep(120);
-
- ret = mipi_dsi_dcs_set_display_on(dsi);
- if (ret < 0) {
- dev_err(dev, "Failed to set panel on: %d\n", ret);
- return ret;
- }
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = ctx->dsi };
+
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xe0, 0xab, 0xba);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xe1, 0xba, 0xab);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb1, 0x10, 0x01, 0x47, 0xff);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb2, 0x0c, 0x14, 0x04, 0x50, 0x50, 0x14);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb3, 0x56, 0x53, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb4, 0x33, 0x30, 0x04);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb6, 0xb0, 0x00, 0x00, 0x10, 0x00, 0x10,
+ 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb8, 0x05, 0x12, 0x29, 0x49, 0x48, 0x00,
+ 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb9, 0x7c, 0x65, 0x55, 0x49, 0x46, 0x36,
+ 0x3b, 0x24, 0x3d, 0x3c, 0x3d, 0x5c, 0x4c,
+ 0x55, 0x47, 0x46, 0x39, 0x26, 0x06, 0x7c,
+ 0x65, 0x55, 0x49, 0x46, 0x36, 0x3b, 0x24,
+ 0x3d, 0x3c, 0x3d, 0x5c, 0x4c, 0x55, 0x47,
+ 0x46, 0x39, 0x26, 0x06);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x00, 0xff, 0x87, 0x12, 0x34, 0x44, 0x44,
+ 0x44, 0x44, 0x98, 0x04, 0x98, 0x04, 0x0f,
+ 0x00, 0x00, 0xc1);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xc1, 0x54, 0x94, 0x02, 0x85, 0x9f, 0x00,
+ 0x7f, 0x00, 0x54, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xc2, 0x17, 0x09, 0x08, 0x89, 0x08, 0x11,
+ 0x22, 0x20, 0x44, 0xff, 0x18, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xc3, 0x86, 0x46, 0x05, 0x05, 0x1c, 0x1c,
+ 0x1d, 0x1d, 0x02, 0x1f, 0x1f, 0x1e, 0x1e,
+ 0x0f, 0x0f, 0x0d, 0x0d, 0x13, 0x13, 0x11,
+ 0x11, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xc4, 0x07, 0x07, 0x04, 0x04, 0x1c, 0x1c,
+ 0x1d, 0x1d, 0x02, 0x1f, 0x1f, 0x1e, 0x1e,
+ 0x0e, 0x0e, 0x0c, 0x0c, 0x12, 0x12, 0x10,
+ 0x10, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xc6, 0x2a, 0x2a);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xc8, 0x21, 0x00, 0x31, 0x42, 0x34, 0x16);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xca, 0xcb, 0x43);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xcd, 0x0e, 0x4b, 0x4b, 0x20, 0x19, 0x6b,
+ 0x06, 0xb3);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xd2, 0xe3, 0x2b, 0x38, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xd4, 0x00, 0x01, 0x00, 0x0e, 0x04, 0x44,
+ 0x08, 0x10, 0x00, 0x00, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xe6, 0x80, 0x01, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xf0, 0x12, 0x03, 0x20, 0x00, 0xff);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xf3, 0x00);
+
+ mipi_dsi_dcs_exit_sleep_mode_multi(&dsi_ctx);
+
+ mipi_dsi_msleep(&dsi_ctx, 120);
+
+ mipi_dsi_dcs_set_display_on_multi(&dsi_ctx);
+
+ return dsi_ctx.accum_err;
+}
- return 0;
+static int starry_er88577_init_cmd(struct boe_th101mb31ig002 *ctx)
+{
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = ctx->dsi };
+
+ msleep(70);
+
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xe0, 0xab, 0xba);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xe1, 0xba, 0xab);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb1, 0x10, 0x01, 0x47, 0xff);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb2, 0x0c, 0x14, 0x04, 0x50, 0x50, 0x14);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb3, 0x56, 0x53, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb4, 0x33, 0x30, 0x04);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb6, 0xb0, 0x00, 0x00, 0x10, 0x00, 0x10,
+ 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb8, 0x05, 0x12, 0x29, 0x49, 0x40);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb9, 0x7c, 0x61, 0x4f, 0x42, 0x3e, 0x2d,
+ 0x31, 0x1a, 0x33, 0x33, 0x33, 0x52, 0x40,
+ 0x47, 0x38, 0x34, 0x26, 0x0e, 0x06, 0x7c,
+ 0x61, 0x4f, 0x42, 0x3e, 0x2d, 0x31, 0x1a,
+ 0x33, 0x33, 0x33, 0x52, 0x40, 0x47, 0x38,
+ 0x34, 0x26, 0x0e, 0x06);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xc0, 0xcc, 0x76, 0x12, 0x34, 0x44, 0x44,
+ 0x44, 0x44, 0x98, 0x04, 0x98, 0x04, 0x0f,
+ 0x00, 0x00, 0xc1);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xc1, 0x54, 0x94, 0x02, 0x85, 0x9f, 0x00,
+ 0x6f, 0x00, 0x54, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xc2, 0x17, 0x09, 0x08, 0x89, 0x08, 0x11,
+ 0x22, 0x20, 0x44, 0xff, 0x18, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xc3, 0x87, 0x47, 0x05, 0x05, 0x1c, 0x1c,
+ 0x1d, 0x1d, 0x02, 0x1e, 0x1e, 0x1f, 0x1f,
+ 0x0f, 0x0f, 0x0d, 0x0d, 0x13, 0x13, 0x11,
+ 0x11, 0x24);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xc4, 0x06, 0x06, 0x04, 0x04, 0x1c, 0x1c,
+ 0x1d, 0x1d, 0x02, 0x1e, 0x1e, 0x1f, 0x1f,
+ 0x0e, 0x0e, 0x0c, 0x0c, 0x12, 0x12, 0x10,
+ 0x10, 0x24);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xc8, 0x21, 0x00, 0x31, 0x42, 0x34, 0x16);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xca, 0xcb, 0x43);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xcd, 0x0e, 0x4b, 0x4b, 0x20, 0x19, 0x6b,
+ 0x06, 0xb3);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xd1, 0x40, 0x0d, 0xff, 0x0f);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xd2, 0xe3, 0x2b, 0x38, 0x08);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xd3, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x33, 0x20, 0x3a, 0xd5, 0x86, 0xf3);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xd4, 0x00, 0x01, 0x00, 0x0e, 0x04, 0x44,
+ 0x08, 0x10, 0x00, 0x00, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xe6, 0x80, 0x09, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xf0, 0x12, 0x03, 0x20, 0x00, 0xff);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xf3, 0x00);
+
+ mipi_dsi_dcs_exit_sleep_mode_multi(&dsi_ctx);
+
+ mipi_dsi_msleep(&dsi_ctx, 120);
+
+ mipi_dsi_dcs_set_display_on_multi(&dsi_ctx);
+
+ mipi_dsi_msleep(&dsi_ctx, 20);
+
+ return dsi_ctx.accum_err;
}
static int boe_th101mb31ig002_disable(struct drm_panel *panel)
@@ -114,21 +182,21 @@ static int boe_th101mb31ig002_disable(struct drm_panel *panel)
struct boe_th101mb31ig002 *ctx = container_of(panel,
struct boe_th101mb31ig002,
panel);
- struct mipi_dsi_device *dsi = ctx->dsi;
- struct device *dev = &dsi->dev;
- int ret;
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = ctx->dsi };
- ret = mipi_dsi_dcs_set_display_off(dsi);
- if (ret < 0)
- dev_err(dev, "Failed to set panel off: %d\n", ret);
+ if (ctx->desc->backlight_off_to_display_off_delay_ms)
+ mipi_dsi_msleep(&dsi_ctx, ctx->desc->backlight_off_to_display_off_delay_ms);
- msleep(120);
+ mipi_dsi_dcs_set_display_off_multi(&dsi_ctx);
- ret = mipi_dsi_dcs_enter_sleep_mode(dsi);
- if (ret < 0)
- dev_err(dev, "Failed to enter sleep mode: %d\n", ret);
+ mipi_dsi_msleep(&dsi_ctx, 120);
- return 0;
+ mipi_dsi_dcs_enter_sleep_mode_multi(&dsi_ctx);
+
+ if (ctx->desc->enter_sleep_to_reset_down_delay_ms)
+ mipi_dsi_msleep(&dsi_ctx, ctx->desc->enter_sleep_to_reset_down_delay_ms);
+
+ return dsi_ctx.accum_err;
}
static int boe_th101mb31ig002_unprepare(struct drm_panel *panel)
@@ -141,6 +209,9 @@ static int boe_th101mb31ig002_unprepare(struct drm_panel *panel)
gpiod_set_value_cansleep(ctx->enable, 0);
regulator_disable(ctx->power);
+ if (ctx->desc->power_off_delay_ms)
+ msleep(ctx->desc->power_off_delay_ms);
+
return 0;
}
@@ -158,10 +229,25 @@ static int boe_th101mb31ig002_prepare(struct drm_panel *panel)
return ret;
}
+ if (ctx->desc->vcioo_to_lp11_delay_ms)
+ msleep(ctx->desc->vcioo_to_lp11_delay_ms);
+
+ if (ctx->desc->lp11_before_reset) {
+ ret = mipi_dsi_dcs_nop(ctx->dsi);
+ if (ret)
+ return ret;
+ }
+
+ if (ctx->desc->lp11_to_reset_delay_ms)
+ msleep(ctx->desc->lp11_to_reset_delay_ms);
+
gpiod_set_value_cansleep(ctx->enable, 1);
msleep(50);
boe_th101mb31ig002_reset(ctx);
- boe_th101mb31ig002_enable(panel);
+
+ ret = ctx->desc->init(ctx);
+ if (ret)
+ return ret;
return 0;
}
@@ -181,39 +267,62 @@ static const struct drm_display_mode boe_th101mb31ig002_default_mode = {
.type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
};
+static const struct panel_desc boe_th101mb31ig002_desc = {
+ .modes = &boe_th101mb31ig002_default_mode,
+ .lanes = 4,
+ .format = MIPI_DSI_FMT_RGB888,
+ .mode_flags = MIPI_DSI_MODE_VIDEO_BURST |
+ MIPI_DSI_MODE_NO_EOT_PACKET |
+ MIPI_DSI_MODE_LPM,
+ .init = boe_th101mb31ig002_enable,
+};
+
+static const struct drm_display_mode starry_er88577_default_mode = {
+ .clock = (800 + 25 + 25 + 25) * (1280 + 20 + 4 + 12) * 60 / 1000,
+ .hdisplay = 800,
+ .hsync_start = 800 + 25,
+ .hsync_end = 800 + 25 + 25,
+ .htotal = 800 + 25 + 25 + 25,
+ .vdisplay = 1280,
+ .vsync_start = 1280 + 20,
+ .vsync_end = 1280 + 20 + 4,
+ .vtotal = 1280 + 20 + 4 + 12,
+ .width_mm = 135,
+ .height_mm = 216,
+ .type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
+};
+
+static const struct panel_desc starry_er88577_desc = {
+ .modes = &starry_er88577_default_mode,
+ .lanes = 4,
+ .format = MIPI_DSI_FMT_RGB888,
+ .mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_SYNC_PULSE |
+ MIPI_DSI_MODE_LPM,
+ .init = starry_er88577_init_cmd,
+ .lp11_before_reset = true,
+ .vcioo_to_lp11_delay_ms = 5,
+ .lp11_to_reset_delay_ms = 50,
+ .backlight_off_to_display_off_delay_ms = 100,
+ .enter_sleep_to_reset_down_delay_ms = 100,
+ .power_off_delay_ms = 1000,
+};
+
static int boe_th101mb31ig002_get_modes(struct drm_panel *panel,
struct drm_connector *connector)
{
struct boe_th101mb31ig002 *ctx = container_of(panel,
struct boe_th101mb31ig002,
panel);
- struct drm_display_mode *mode;
-
- mode = drm_mode_duplicate(connector->dev,
- &boe_th101mb31ig002_default_mode);
- if (!mode) {
- dev_err(panel->dev, "Failed to add mode %ux%u@%u\n",
- boe_th101mb31ig002_default_mode.hdisplay,
- boe_th101mb31ig002_default_mode.vdisplay,
- drm_mode_vrefresh(&boe_th101mb31ig002_default_mode));
- return -ENOMEM;
- }
-
- drm_mode_set_name(mode);
+ const struct drm_display_mode *desc_mode = ctx->desc->modes;
connector->display_info.bpc = 8;
- connector->display_info.width_mm = mode->width_mm;
- connector->display_info.height_mm = mode->height_mm;
-
/*
* TODO: Remove once all drm drivers call
* drm_connector_set_orientation_from_panel()
*/
drm_connector_set_panel_orientation(connector, ctx->orientation);
- drm_mode_probed_add(connector, mode);
-
- return 1;
+ return drm_connector_helper_get_modes_fixed(connector, desc_mode);
}
static enum drm_panel_orientation
@@ -237,6 +346,7 @@ static const struct drm_panel_funcs boe_th101mb31ig002_funcs = {
static int boe_th101mb31ig002_dsi_probe(struct mipi_dsi_device *dsi)
{
struct boe_th101mb31ig002 *ctx;
+ const struct panel_desc *desc;
int ret;
ctx = devm_kzalloc(&dsi->dev, sizeof(*ctx), GFP_KERNEL);
@@ -246,11 +356,11 @@ static int boe_th101mb31ig002_dsi_probe(struct mipi_dsi_device *dsi)
mipi_dsi_set_drvdata(dsi, ctx);
ctx->dsi = dsi;
- dsi->lanes = 4;
- dsi->format = MIPI_DSI_FMT_RGB888;
- dsi->mode_flags = MIPI_DSI_MODE_VIDEO_BURST |
- MIPI_DSI_MODE_NO_EOT_PACKET |
- MIPI_DSI_MODE_LPM;
+ desc = of_device_get_match_data(&dsi->dev);
+ dsi->lanes = desc->lanes;
+ dsi->format = desc->format;
+ dsi->mode_flags = desc->mode_flags;
+ ctx->desc = desc;
ctx->power = devm_regulator_get(&dsi->dev, "power");
if (IS_ERR(ctx->power))
@@ -262,7 +372,7 @@ static int boe_th101mb31ig002_dsi_probe(struct mipi_dsi_device *dsi)
return dev_err_probe(&dsi->dev, PTR_ERR(ctx->enable),
"Failed to get enable GPIO\n");
- ctx->reset = devm_gpiod_get(&dsi->dev, "reset", GPIOD_OUT_HIGH);
+ ctx->reset = devm_gpiod_get_optional(&dsi->dev, "reset", GPIOD_OUT_HIGH);
if (IS_ERR(ctx->reset))
return dev_err_probe(&dsi->dev, PTR_ERR(ctx->reset),
"Failed to get reset GPIO\n");
@@ -302,7 +412,14 @@ static void boe_th101mb31ig002_dsi_remove(struct mipi_dsi_device *dsi)
}
static const struct of_device_id boe_th101mb31ig002_of_match[] = {
- { .compatible = "boe,th101mb31ig002-28a", },
+ {
+ .compatible = "boe,th101mb31ig002-28a",
+ .data = &boe_th101mb31ig002_desc
+ },
+ {
+ .compatible = "starry,er88577",
+ .data = &starry_er88577_desc
+ },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, boe_th101mb31ig002_of_match);
diff --git a/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c b/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c
index ce919a980875..3e5b0d8636d0 100644
--- a/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c
+++ b/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c
@@ -54,12 +54,22 @@ struct boe_panel {
struct gpio_desc *enable_gpio;
};
+#define NT36523_DCS_SWITCH_PAGE 0xff
+
+#define nt36523_switch_page(ctx, page) \
+ mipi_dsi_dcs_write_seq_multi(ctx, NT36523_DCS_SWITCH_PAGE, (page))
+
+static void nt36523_enable_reload_cmds(struct mipi_dsi_multi_context *ctx)
+{
+ mipi_dsi_dcs_write_seq_multi(ctx, 0xfb, 0x01);
+}
+
static int boe_tv110c9m_init(struct boe_panel *boe)
{
struct mipi_dsi_multi_context ctx = { .dsi = boe->dsi };
- mipi_dsi_dcs_write_seq_multi(&ctx, 0xff, 0x20);
- mipi_dsi_dcs_write_seq_multi(&ctx, 0xfb, 0x01);
+ nt36523_switch_page(&ctx, 0x20);
+ nt36523_enable_reload_cmds(&ctx);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x05, 0xd9);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x07, 0x78);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x08, 0x5a);
@@ -99,16 +109,14 @@ static int boe_tv110c9m_init(struct boe_panel *boe)
mipi_dsi_dcs_write_seq_multi(&ctx, 0xbb, 0x03, 0x8e, 0x03, 0xa2, 0x03, 0xb7, 0x03, 0xe7,
0x03, 0xfd, 0x03, 0xff);
- mipi_dsi_dcs_write_seq_multi(&ctx, 0xff, 0x21);
- mipi_dsi_dcs_write_seq_multi(&ctx, 0xfb, 0x01);
-
+ nt36523_switch_page(&ctx, 0x21);
+ nt36523_enable_reload_cmds(&ctx);
mipi_dsi_dcs_write_seq_multi(&ctx, 0xb0, 0x00, 0x00, 0x00, 0x1b, 0x00, 0x45, 0x00, 0x65,
0x00, 0x81, 0x00, 0x99, 0x00, 0xae, 0x00, 0xc1);
mipi_dsi_dcs_write_seq_multi(&ctx, 0xb1, 0x00, 0xd2, 0x01, 0x0b, 0x01, 0x34, 0x01, 0x76,
0x01, 0xa3, 0x01, 0xef, 0x02, 0x27, 0x02, 0x29);
mipi_dsi_dcs_write_seq_multi(&ctx, 0xb2, 0x02, 0x5f, 0x02, 0x9e, 0x02, 0xc9, 0x03, 0x00,
0x03, 0x26, 0x03, 0x53, 0x03, 0x63, 0x03, 0x73);
-
mipi_dsi_dcs_write_seq_multi(&ctx, 0xb3, 0x03, 0x86, 0x03, 0x9a, 0x03, 0xaf, 0x03, 0xdf,
0x03, 0xf5, 0x03, 0xe0);
mipi_dsi_dcs_write_seq_multi(&ctx, 0xb4, 0x00, 0x00, 0x00, 0x1b, 0x00, 0x45, 0x00, 0x65,
@@ -119,89 +127,66 @@ static int boe_tv110c9m_init(struct boe_panel *boe)
0x03, 0x26, 0x03, 0x53, 0x03, 0x63, 0x03, 0x73);
mipi_dsi_dcs_write_seq_multi(&ctx, 0xb7, 0x03, 0x86, 0x03, 0x9a, 0x03, 0xaf, 0x03, 0xdf,
0x03, 0xf5, 0x03, 0xe0);
-
mipi_dsi_dcs_write_seq_multi(&ctx, 0xb8, 0x00, 0x00, 0x00, 0x1b, 0x00, 0x45, 0x00, 0x65,
0x00, 0x81, 0x00, 0x99, 0x00, 0xae, 0x00, 0xc1);
mipi_dsi_dcs_write_seq_multi(&ctx, 0xb9, 0x00, 0xd2, 0x01, 0x0b, 0x01, 0x34, 0x01, 0x76,
0x01, 0xa3, 0x01, 0xef, 0x02, 0x27, 0x02, 0x29);
mipi_dsi_dcs_write_seq_multi(&ctx, 0xba, 0x02, 0x5f, 0x02, 0x9e, 0x02, 0xc9, 0x03, 0x00,
0x03, 0x26, 0x03, 0x53, 0x03, 0x63, 0x03, 0x73);
-
mipi_dsi_dcs_write_seq_multi(&ctx, 0xbb, 0x03, 0x86, 0x03, 0x9a, 0x03, 0xaf, 0x03, 0xdf,
0x03, 0xf5, 0x03, 0xe0);
- mipi_dsi_dcs_write_seq_multi(&ctx, 0xff, 0x24);
- mipi_dsi_dcs_write_seq_multi(&ctx, 0xfb, 0x01);
+ nt36523_switch_page(&ctx, 0x24);
+ nt36523_enable_reload_cmds(&ctx);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x00, 0x00);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x01, 0x00);
-
mipi_dsi_dcs_write_seq_multi(&ctx, 0x02, 0x1c);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x03, 0x1c);
-
mipi_dsi_dcs_write_seq_multi(&ctx, 0x04, 0x1d);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x05, 0x1d);
-
mipi_dsi_dcs_write_seq_multi(&ctx, 0x06, 0x04);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x07, 0x04);
-
mipi_dsi_dcs_write_seq_multi(&ctx, 0x08, 0x0f);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x09, 0x0f);
-
mipi_dsi_dcs_write_seq_multi(&ctx, 0x0a, 0x0e);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x0b, 0x0e);
-
mipi_dsi_dcs_write_seq_multi(&ctx, 0x0c, 0x0d);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x0d, 0x0d);
-
mipi_dsi_dcs_write_seq_multi(&ctx, 0x0e, 0x0c);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x0f, 0x0c);
-
mipi_dsi_dcs_write_seq_multi(&ctx, 0x10, 0x08);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x11, 0x08);
-
mipi_dsi_dcs_write_seq_multi(&ctx, 0x12, 0x00);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x13, 0x00);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x14, 0x00);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x15, 0x00);
-
mipi_dsi_dcs_write_seq_multi(&ctx, 0x16, 0x00);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x17, 0x00);
-
mipi_dsi_dcs_write_seq_multi(&ctx, 0x18, 0x1c);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x19, 0x1c);
-
mipi_dsi_dcs_write_seq_multi(&ctx, 0x1a, 0x1d);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x1b, 0x1d);
-
mipi_dsi_dcs_write_seq_multi(&ctx, 0x1c, 0x04);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x1d, 0x04);
-
mipi_dsi_dcs_write_seq_multi(&ctx, 0x1e, 0x0f);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x1f, 0x0f);
-
mipi_dsi_dcs_write_seq_multi(&ctx, 0x20, 0x0e);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x21, 0x0e);
-
mipi_dsi_dcs_write_seq_multi(&ctx, 0x22, 0x0d);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x23, 0x0d);
-
mipi_dsi_dcs_write_seq_multi(&ctx, 0x24, 0x0c);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x25, 0x0c);
-
mipi_dsi_dcs_write_seq_multi(&ctx, 0x26, 0x08);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x27, 0x08);
-
mipi_dsi_dcs_write_seq_multi(&ctx, 0x28, 0x00);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x29, 0x00);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x2a, 0x00);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x2b, 0x00);
-
mipi_dsi_dcs_write_seq_multi(&ctx, 0x2d, 0x20);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x2f, 0x0a);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x30, 0x44);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x33, 0x0c);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x34, 0x32);
-
mipi_dsi_dcs_write_seq_multi(&ctx, 0x37, 0x44);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x38, 0x40);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x39, 0x00);
@@ -244,7 +229,6 @@ static int boe_tv110c9m_init(struct boe_panel *boe)
mipi_dsi_dcs_write_seq_multi(&ctx, 0xdb, 0x05);
mipi_dsi_dcs_write_seq_multi(&ctx, 0xdc, 0xa9);
mipi_dsi_dcs_write_seq_multi(&ctx, 0xdd, 0x22);
-
mipi_dsi_dcs_write_seq_multi(&ctx, 0xdf, 0x05);
mipi_dsi_dcs_write_seq_multi(&ctx, 0xe0, 0xa9);
mipi_dsi_dcs_write_seq_multi(&ctx, 0xe1, 0x05);
@@ -258,8 +242,9 @@ static int boe_tv110c9m_init(struct boe_panel *boe)
mipi_dsi_dcs_write_seq_multi(&ctx, 0x8d, 0x00);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x8e, 0x00);
mipi_dsi_dcs_write_seq_multi(&ctx, 0xb5, 0x90);
- mipi_dsi_dcs_write_seq_multi(&ctx, 0xff, 0x25);
- mipi_dsi_dcs_write_seq_multi(&ctx, 0xfb, 0x01);
+
+ nt36523_switch_page(&ctx, 0x25);
+ nt36523_enable_reload_cmds(&ctx);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x05, 0x00);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x19, 0x07);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x1f, 0x60);
@@ -281,26 +266,22 @@ static int boe_tv110c9m_init(struct boe_panel *boe)
mipi_dsi_dcs_write_seq_multi(&ctx, 0x61, 0x60);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x62, 0x50);
mipi_dsi_dcs_write_seq_multi(&ctx, 0xf1, 0x10);
- mipi_dsi_dcs_write_seq_multi(&ctx, 0xff, 0x2a);
- mipi_dsi_dcs_write_seq_multi(&ctx, 0xfb, 0x01);
+ nt36523_switch_page(&ctx, 0x2a);
+ nt36523_enable_reload_cmds(&ctx);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x64, 0x16);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x67, 0x16);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x6a, 0x16);
-
mipi_dsi_dcs_write_seq_multi(&ctx, 0x70, 0x30);
-
mipi_dsi_dcs_write_seq_multi(&ctx, 0xa2, 0xf3);
mipi_dsi_dcs_write_seq_multi(&ctx, 0xa3, 0xff);
mipi_dsi_dcs_write_seq_multi(&ctx, 0xa4, 0xff);
mipi_dsi_dcs_write_seq_multi(&ctx, 0xa5, 0xff);
-
mipi_dsi_dcs_write_seq_multi(&ctx, 0xd6, 0x08);
- mipi_dsi_dcs_write_seq_multi(&ctx, 0xff, 0x26);
- mipi_dsi_dcs_write_seq_multi(&ctx, 0xfb, 0x01);
+ nt36523_switch_page(&ctx, 0x26);
+ nt36523_enable_reload_cmds(&ctx);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x00, 0xa1);
-
mipi_dsi_dcs_write_seq_multi(&ctx, 0x02, 0x31);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x04, 0x28);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x06, 0x30);
@@ -323,7 +304,6 @@ static int boe_tv110c9m_init(struct boe_panel *boe)
mipi_dsi_dcs_write_seq_multi(&ctx, 0x23, 0x00);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x2a, 0x0d);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x2b, 0x7f);
-
mipi_dsi_dcs_write_seq_multi(&ctx, 0x1d, 0x00);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x1e, 0x65);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x1f, 0x65);
@@ -343,7 +323,6 @@ static int boe_tv110c9m_init(struct boe_panel *boe)
mipi_dsi_dcs_write_seq_multi(&ctx, 0xc9, 0x9e);
mipi_dsi_dcs_write_seq_multi(&ctx, 0xca, 0x4e);
mipi_dsi_dcs_write_seq_multi(&ctx, 0xcb, 0x00);
-
mipi_dsi_dcs_write_seq_multi(&ctx, 0xa9, 0x49);
mipi_dsi_dcs_write_seq_multi(&ctx, 0xaa, 0x4b);
mipi_dsi_dcs_write_seq_multi(&ctx, 0xab, 0x48);
@@ -373,9 +352,9 @@ static int boe_tv110c9m_init(struct boe_panel *boe)
mipi_dsi_dcs_write_seq_multi(&ctx, 0xc3, 0x4f);
mipi_dsi_dcs_write_seq_multi(&ctx, 0xc4, 0x3a);
mipi_dsi_dcs_write_seq_multi(&ctx, 0xc5, 0x42);
- mipi_dsi_dcs_write_seq_multi(&ctx, 0xff, 0x27);
- mipi_dsi_dcs_write_seq_multi(&ctx, 0xfb, 0x01);
+ nt36523_switch_page(&ctx, 0x27);
+ nt36523_enable_reload_cmds(&ctx);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x56, 0x06);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x58, 0x80);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x59, 0x75);
@@ -394,17 +373,14 @@ static int boe_tv110c9m_init(struct boe_panel *boe)
mipi_dsi_dcs_write_seq_multi(&ctx, 0x66, 0x00);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x67, 0x01);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x68, 0x44);
-
mipi_dsi_dcs_write_seq_multi(&ctx, 0x00, 0x00);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x78, 0x00);
mipi_dsi_dcs_write_seq_multi(&ctx, 0xc3, 0x00);
- mipi_dsi_dcs_write_seq_multi(&ctx, 0xff, 0x2a);
- mipi_dsi_dcs_write_seq_multi(&ctx, 0xfb, 0x01);
-
+ nt36523_switch_page(&ctx, 0x2a);
+ nt36523_enable_reload_cmds(&ctx);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x22, 0x2f);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x23, 0x08);
-
mipi_dsi_dcs_write_seq_multi(&ctx, 0x24, 0x00);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x25, 0x65);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x26, 0xf8);
@@ -415,30 +391,30 @@ static int boe_tv110c9m_init(struct boe_panel *boe)
mipi_dsi_dcs_write_seq_multi(&ctx, 0x2b, 0x00);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x2d, 0x1a);
- mipi_dsi_dcs_write_seq_multi(&ctx, 0xff, 0x23);
- mipi_dsi_dcs_write_seq_multi(&ctx, 0xfb, 0x01);
-
+ nt36523_switch_page(&ctx, 0x23);
+ nt36523_enable_reload_cmds(&ctx);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x00, 0x80);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x07, 0x00);
- mipi_dsi_dcs_write_seq_multi(&ctx, 0xff, 0xe0);
- mipi_dsi_dcs_write_seq_multi(&ctx, 0xfb, 0x01);
+ nt36523_switch_page(&ctx, 0xe0);
+ nt36523_enable_reload_cmds(&ctx);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x14, 0x60);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x16, 0xc0);
- mipi_dsi_dcs_write_seq_multi(&ctx, 0xff, 0xf0);
- mipi_dsi_dcs_write_seq_multi(&ctx, 0xfb, 0x01);
+ nt36523_switch_page(&ctx, 0xf0);
+ nt36523_enable_reload_cmds(&ctx);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x3a, 0x08);
- mipi_dsi_dcs_write_seq_multi(&ctx, 0xff, 0x10);
- mipi_dsi_dcs_write_seq_multi(&ctx, 0xfb, 0x01);
+ nt36523_switch_page(&ctx, 0x10);
+ nt36523_enable_reload_cmds(&ctx);
mipi_dsi_dcs_write_seq_multi(&ctx, 0xb9, 0x01);
- mipi_dsi_dcs_write_seq_multi(&ctx, 0xff, 0x20);
- mipi_dsi_dcs_write_seq_multi(&ctx, 0xfb, 0x01);
+
+ nt36523_switch_page(&ctx, 0x20);
+ nt36523_enable_reload_cmds(&ctx);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x18, 0x40);
- mipi_dsi_dcs_write_seq_multi(&ctx, 0xff, 0x10);
- mipi_dsi_dcs_write_seq_multi(&ctx, 0xfb, 0x01);
+ nt36523_switch_page(&ctx, 0x10);
+ nt36523_enable_reload_cmds(&ctx);
mipi_dsi_dcs_write_seq_multi(&ctx, 0xb9, 0x02);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x35, 0x00);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x51, 0x00, 0xff);
@@ -464,13 +440,12 @@ static int inx_hj110iz_init(struct boe_panel *boe)
{
struct mipi_dsi_multi_context ctx = { .dsi = boe->dsi };
- mipi_dsi_dcs_write_seq_multi(&ctx, 0xff, 0x20);
- mipi_dsi_dcs_write_seq_multi(&ctx, 0xfb, 0x01);
+ nt36523_switch_page(&ctx, 0x20);
+ nt36523_enable_reload_cmds(&ctx);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x05, 0xd1);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x06, 0xc0);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x07, 0x87);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x08, 0x4b);
-
mipi_dsi_dcs_write_seq_multi(&ctx, 0x0d, 0x63);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x0e, 0x91);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x0f, 0x69);
@@ -482,10 +457,10 @@ static int inx_hj110iz_init(struct boe_panel *boe)
mipi_dsi_dcs_write_seq_multi(&ctx, 0x69, 0x98);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x75, 0xa2);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x77, 0xb3);
-
mipi_dsi_dcs_write_seq_multi(&ctx, 0x58, 0x43);
- mipi_dsi_dcs_write_seq_multi(&ctx, 0xff, 0x24);
- mipi_dsi_dcs_write_seq_multi(&ctx, 0xfb, 0x01);
+
+ nt36523_switch_page(&ctx, 0x24);
+ nt36523_enable_reload_cmds(&ctx);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x91, 0x44);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x92, 0x4c);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x94, 0x86);
@@ -493,7 +468,6 @@ static int inx_hj110iz_init(struct boe_panel *boe)
mipi_dsi_dcs_write_seq_multi(&ctx, 0x61, 0xd0);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x63, 0x70);
mipi_dsi_dcs_write_seq_multi(&ctx, 0xc2, 0xca);
-
mipi_dsi_dcs_write_seq_multi(&ctx, 0x00, 0x03);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x01, 0x03);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x02, 0x03);
@@ -538,7 +512,6 @@ static int inx_hj110iz_init(struct boe_panel *boe)
mipi_dsi_dcs_write_seq_multi(&ctx, 0x29, 0x04);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x2a, 0x00);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x2b, 0x03);
-
mipi_dsi_dcs_write_seq_multi(&ctx, 0x2f, 0x0a);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x30, 0x35);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x37, 0xa7);
@@ -546,7 +519,6 @@ static int inx_hj110iz_init(struct boe_panel *boe)
mipi_dsi_dcs_write_seq_multi(&ctx, 0x3a, 0x46);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x3b, 0x32);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x3d, 0x12);
-
mipi_dsi_dcs_write_seq_multi(&ctx, 0x3f, 0x33);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x40, 0x31);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x41, 0x40);
@@ -556,7 +528,6 @@ static int inx_hj110iz_init(struct boe_panel *boe)
mipi_dsi_dcs_write_seq_multi(&ctx, 0x4a, 0x45);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x4b, 0x45);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x4c, 0x14);
-
mipi_dsi_dcs_write_seq_multi(&ctx, 0x4d, 0x21);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x4e, 0x43);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x4f, 0x65);
@@ -569,7 +540,6 @@ static int inx_hj110iz_init(struct boe_panel *boe)
mipi_dsi_dcs_write_seq_multi(&ctx, 0x5c, 0x88);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x5e, 0x00, 0x00);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x5f, 0x00);
-
mipi_dsi_dcs_write_seq_multi(&ctx, 0x7a, 0xff);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x7b, 0xff);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x7c, 0x00);
@@ -581,7 +551,6 @@ static int inx_hj110iz_init(struct boe_panel *boe)
mipi_dsi_dcs_write_seq_multi(&ctx, 0x82, 0x08);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x97, 0x02);
mipi_dsi_dcs_write_seq_multi(&ctx, 0xc5, 0x10);
-
mipi_dsi_dcs_write_seq_multi(&ctx, 0xd7, 0x55);
mipi_dsi_dcs_write_seq_multi(&ctx, 0xd8, 0x55);
mipi_dsi_dcs_write_seq_multi(&ctx, 0xd9, 0x23);
@@ -609,43 +578,32 @@ static int inx_hj110iz_init(struct boe_panel *boe)
mipi_dsi_dcs_write_seq_multi(&ctx, 0xb6, 0x05, 0x00, 0x05, 0x00, 0x00, 0x00, 0x00, 0x00,
0x05, 0x05, 0x00, 0x00);
- mipi_dsi_dcs_write_seq_multi(&ctx, 0xff, 0x25);
-
- mipi_dsi_dcs_write_seq_multi(&ctx, 0xfb, 0x01);
+ nt36523_switch_page(&ctx, 0x25);
+ nt36523_enable_reload_cmds(&ctx);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x05, 0x00);
mipi_dsi_dcs_write_seq_multi(&ctx, 0xf1, 0x10);
-
mipi_dsi_dcs_write_seq_multi(&ctx, 0x1e, 0x00);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x1f, 0x46);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x20, 0x32);
-
mipi_dsi_dcs_write_seq_multi(&ctx, 0x25, 0x00);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x26, 0x46);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x27, 0x32);
-
mipi_dsi_dcs_write_seq_multi(&ctx, 0x3f, 0x80);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x40, 0x00);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x43, 0x00);
-
mipi_dsi_dcs_write_seq_multi(&ctx, 0x44, 0x46);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x45, 0x46);
-
mipi_dsi_dcs_write_seq_multi(&ctx, 0x48, 0x46);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x49, 0x32);
-
mipi_dsi_dcs_write_seq_multi(&ctx, 0x5b, 0x80);
-
mipi_dsi_dcs_write_seq_multi(&ctx, 0x5c, 0x00);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x5d, 0x46);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x5e, 0x32);
-
mipi_dsi_dcs_write_seq_multi(&ctx, 0x5f, 0x46);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x60, 0x32);
-
mipi_dsi_dcs_write_seq_multi(&ctx, 0x61, 0x46);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x62, 0x32);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x68, 0x0c);
-
mipi_dsi_dcs_write_seq_multi(&ctx, 0x6c, 0x0d);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x6e, 0x0d);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x78, 0x00);
@@ -653,9 +611,8 @@ static int inx_hj110iz_init(struct boe_panel *boe)
mipi_dsi_dcs_write_seq_multi(&ctx, 0x7a, 0x0c);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x7b, 0xb0);
- mipi_dsi_dcs_write_seq_multi(&ctx, 0xff, 0x26);
- mipi_dsi_dcs_write_seq_multi(&ctx, 0xfb, 0x01);
-
+ nt36523_switch_page(&ctx, 0x26);
+ nt36523_enable_reload_cmds(&ctx);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x00, 0xa1);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x02, 0x31);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x0a, 0xf4);
@@ -674,18 +631,15 @@ static int inx_hj110iz_init(struct boe_panel *boe)
mipi_dsi_dcs_write_seq_multi(&ctx, 0x18, 0x86);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x22, 0x00);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x23, 0x00);
-
mipi_dsi_dcs_write_seq_multi(&ctx, 0x19, 0x0e);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x1a, 0x31);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x1b, 0x0d);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x1c, 0x29);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x2a, 0x0e);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x2b, 0x31);
-
mipi_dsi_dcs_write_seq_multi(&ctx, 0x1d, 0x00);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x1e, 0x62);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x1f, 0x62);
-
mipi_dsi_dcs_write_seq_multi(&ctx, 0x2f, 0x06);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x30, 0x62);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x31, 0x06);
@@ -693,11 +647,9 @@ static int inx_hj110iz_init(struct boe_panel *boe)
mipi_dsi_dcs_write_seq_multi(&ctx, 0x33, 0x11);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x34, 0x89);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x35, 0x67);
-
mipi_dsi_dcs_write_seq_multi(&ctx, 0x39, 0x0b);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x3a, 0x62);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x3b, 0x06);
-
mipi_dsi_dcs_write_seq_multi(&ctx, 0xc8, 0x04);
mipi_dsi_dcs_write_seq_multi(&ctx, 0xc9, 0x89);
mipi_dsi_dcs_write_seq_multi(&ctx, 0xca, 0x4e);
@@ -711,21 +663,18 @@ static int inx_hj110iz_init(struct boe_panel *boe)
mipi_dsi_dcs_write_seq_multi(&ctx, 0xaf, 0x39);
mipi_dsi_dcs_write_seq_multi(&ctx, 0xb0, 0x38);
- mipi_dsi_dcs_write_seq_multi(&ctx, 0xff, 0x27);
- mipi_dsi_dcs_write_seq_multi(&ctx, 0xfb, 0x01);
-
+ nt36523_switch_page(&ctx, 0x27);
+ nt36523_enable_reload_cmds(&ctx);
mipi_dsi_dcs_write_seq_multi(&ctx, 0xd0, 0x11);
mipi_dsi_dcs_write_seq_multi(&ctx, 0xd1, 0x54);
mipi_dsi_dcs_write_seq_multi(&ctx, 0xde, 0x43);
mipi_dsi_dcs_write_seq_multi(&ctx, 0xdf, 0x02);
-
mipi_dsi_dcs_write_seq_multi(&ctx, 0xc0, 0x18);
mipi_dsi_dcs_write_seq_multi(&ctx, 0xc1, 0x00);
mipi_dsi_dcs_write_seq_multi(&ctx, 0xc2, 0x00);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x00, 0x00);
mipi_dsi_dcs_write_seq_multi(&ctx, 0xc3, 0x00);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x56, 0x06);
-
mipi_dsi_dcs_write_seq_multi(&ctx, 0x58, 0x80);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x59, 0x78);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x5a, 0x00);
@@ -743,20 +692,17 @@ static int inx_hj110iz_init(struct boe_panel *boe)
mipi_dsi_dcs_write_seq_multi(&ctx, 0x66, 0x00);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x67, 0x01);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x68, 0x44);
-
mipi_dsi_dcs_write_seq_multi(&ctx, 0x98, 0x01);
mipi_dsi_dcs_write_seq_multi(&ctx, 0xb4, 0x03);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x9b, 0xbe);
-
mipi_dsi_dcs_write_seq_multi(&ctx, 0xab, 0x14);
mipi_dsi_dcs_write_seq_multi(&ctx, 0xbc, 0x08);
mipi_dsi_dcs_write_seq_multi(&ctx, 0xbd, 0x28);
- mipi_dsi_dcs_write_seq_multi(&ctx, 0xff, 0x2a);
- mipi_dsi_dcs_write_seq_multi(&ctx, 0xfb, 0x01);
+ nt36523_switch_page(&ctx, 0x2a);
+ nt36523_enable_reload_cmds(&ctx);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x22, 0x2f);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x23, 0x08);
-
mipi_dsi_dcs_write_seq_multi(&ctx, 0x24, 0x00);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x25, 0x62);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x26, 0xf8);
@@ -766,7 +712,6 @@ static int inx_hj110iz_init(struct boe_panel *boe)
mipi_dsi_dcs_write_seq_multi(&ctx, 0x2a, 0x1a);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x2b, 0x00);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x2d, 0x1a);
-
mipi_dsi_dcs_write_seq_multi(&ctx, 0x64, 0x96);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x65, 0x10);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x66, 0x00);
@@ -783,14 +728,11 @@ static int inx_hj110iz_init(struct boe_panel *boe)
mipi_dsi_dcs_write_seq_multi(&ctx, 0x7a, 0x10);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x88, 0x96);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x89, 0x10);
-
mipi_dsi_dcs_write_seq_multi(&ctx, 0xa2, 0x3f);
mipi_dsi_dcs_write_seq_multi(&ctx, 0xa3, 0x30);
mipi_dsi_dcs_write_seq_multi(&ctx, 0xa4, 0xc0);
mipi_dsi_dcs_write_seq_multi(&ctx, 0xa5, 0x03);
-
mipi_dsi_dcs_write_seq_multi(&ctx, 0xe8, 0x00);
-
mipi_dsi_dcs_write_seq_multi(&ctx, 0x97, 0x3c);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x98, 0x02);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x99, 0x95);
@@ -800,7 +742,7 @@ static int inx_hj110iz_init(struct boe_panel *boe)
mipi_dsi_dcs_write_seq_multi(&ctx, 0x9d, 0x0a);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x9e, 0x90);
- mipi_dsi_dcs_write_seq_multi(&ctx, 0xff, 0x25);
+ nt36523_switch_page(&ctx, 0x25);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x13, 0x02);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x14, 0xd7);
mipi_dsi_dcs_write_seq_multi(&ctx, 0xdb, 0x02);
@@ -809,8 +751,7 @@ static int inx_hj110iz_init(struct boe_panel *boe)
mipi_dsi_dcs_write_seq_multi(&ctx, 0x19, 0x0f);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x1b, 0x5b);
- mipi_dsi_dcs_write_seq_multi(&ctx, 0xff, 0x20);
-
+ nt36523_switch_page(&ctx, 0x20);
mipi_dsi_dcs_write_seq_multi(&ctx, 0xb0, 0x00, 0x00, 0x00, 0x0c, 0x00, 0x24, 0x00, 0x38,
0x00, 0x4c, 0x00, 0x5e, 0x00, 0x6f, 0x00, 0x7e);
mipi_dsi_dcs_write_seq_multi(&ctx, 0xb1, 0x00, 0x8c, 0x00, 0xbe, 0x00, 0xe5, 0x01, 0x27,
@@ -819,7 +760,6 @@ static int inx_hj110iz_init(struct boe_panel *boe)
0x03, 0x00, 0x03, 0x31, 0x03, 0x40, 0x03, 0x51);
mipi_dsi_dcs_write_seq_multi(&ctx, 0xb3, 0x03, 0x62, 0x03, 0x75, 0x03, 0x89, 0x03, 0x9c,
0x03, 0xaa, 0x03, 0xb2);
-
mipi_dsi_dcs_write_seq_multi(&ctx, 0xb4, 0x00, 0x00, 0x00, 0x0d, 0x00, 0x27, 0x00, 0x3d,
0x00, 0x52, 0x00, 0x64, 0x00, 0x75, 0x00, 0x84);
mipi_dsi_dcs_write_seq_multi(&ctx, 0xb5, 0x00, 0x93, 0x00, 0xc5, 0x00, 0xec, 0x01, 0x2c,
@@ -828,7 +768,6 @@ static int inx_hj110iz_init(struct boe_panel *boe)
0x03, 0x01, 0x03, 0x31, 0x03, 0x41, 0x03, 0x51);
mipi_dsi_dcs_write_seq_multi(&ctx, 0xb7, 0x03, 0x63, 0x03, 0x75, 0x03, 0x89, 0x03, 0x9c,
0x03, 0xaa, 0x03, 0xb2);
-
mipi_dsi_dcs_write_seq_multi(&ctx, 0xb8, 0x00, 0x00, 0x00, 0x0e, 0x00, 0x2a, 0x00, 0x40,
0x00, 0x56, 0x00, 0x68, 0x00, 0x7a, 0x00, 0x89);
mipi_dsi_dcs_write_seq_multi(&ctx, 0xb9, 0x00, 0x98, 0x00, 0xc9, 0x00, 0xf1, 0x01, 0x30,
@@ -838,7 +777,7 @@ static int inx_hj110iz_init(struct boe_panel *boe)
mipi_dsi_dcs_write_seq_multi(&ctx, 0xbb, 0x03, 0x66, 0x03, 0x75, 0x03, 0x89, 0x03, 0x9c,
0x03, 0xaa, 0x03, 0xb2);
- mipi_dsi_dcs_write_seq_multi(&ctx, 0xff, 0x21);
+ nt36523_switch_page(&ctx, 0x21);
mipi_dsi_dcs_write_seq_multi(&ctx, 0xb0, 0x00, 0x00, 0x00, 0x0c, 0x00, 0x24, 0x00, 0x38,
0x00, 0x4c, 0x00, 0x5e, 0x00, 0x6f, 0x00, 0x7e);
mipi_dsi_dcs_write_seq_multi(&ctx, 0xb1, 0x00, 0x8c, 0x00, 0xbe, 0x00, 0xe5, 0x01, 0x27,
@@ -847,7 +786,6 @@ static int inx_hj110iz_init(struct boe_panel *boe)
0x03, 0x00, 0x03, 0x31, 0x03, 0x40, 0x03, 0x51);
mipi_dsi_dcs_write_seq_multi(&ctx, 0xb3, 0x03, 0x62, 0x03, 0x77, 0x03, 0x90, 0x03, 0xac,
0x03, 0xca, 0x03, 0xda);
-
mipi_dsi_dcs_write_seq_multi(&ctx, 0xb4, 0x00, 0x00, 0x00, 0x0d, 0x00, 0x27, 0x00, 0x3d,
0x00, 0x52, 0x00, 0x64, 0x00, 0x75, 0x00, 0x84);
mipi_dsi_dcs_write_seq_multi(&ctx, 0xb5, 0x00, 0x93, 0x00, 0xc5, 0x00, 0xec, 0x01, 0x2c,
@@ -856,7 +794,6 @@ static int inx_hj110iz_init(struct boe_panel *boe)
0x03, 0x01, 0x03, 0x31, 0x03, 0x41, 0x03, 0x51);
mipi_dsi_dcs_write_seq_multi(&ctx, 0xb7, 0x03, 0x63, 0x03, 0x77, 0x03, 0x90, 0x03, 0xac,
0x03, 0xca, 0x03, 0xda);
-
mipi_dsi_dcs_write_seq_multi(&ctx, 0xb8, 0x00, 0x00, 0x00, 0x0e, 0x00, 0x2a, 0x00, 0x40,
0x00, 0x56, 0x00, 0x68, 0x00, 0x7a, 0x00, 0x89);
mipi_dsi_dcs_write_seq_multi(&ctx, 0xb9, 0x00, 0x98, 0x00, 0xc9, 0x00, 0xf1, 0x01, 0x30,
@@ -866,22 +803,21 @@ static int inx_hj110iz_init(struct boe_panel *boe)
mipi_dsi_dcs_write_seq_multi(&ctx, 0xbb, 0x03, 0x66, 0x03, 0x77, 0x03, 0x90, 0x03, 0xac,
0x03, 0xca, 0x03, 0xda);
- mipi_dsi_dcs_write_seq_multi(&ctx, 0xff, 0xf0);
- mipi_dsi_dcs_write_seq_multi(&ctx, 0xfb, 0x01);
+ nt36523_switch_page(&ctx, 0xf0);
+ nt36523_enable_reload_cmds(&ctx);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x3a, 0x08);
- mipi_dsi_dcs_write_seq_multi(&ctx, 0xff, 0x10);
+ nt36523_switch_page(&ctx, 0x10);
mipi_dsi_dcs_write_seq_multi(&ctx, 0xb9, 0x01);
- mipi_dsi_dcs_write_seq_multi(&ctx, 0xff, 0x20);
-
+ nt36523_switch_page(&ctx, 0x20);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x18, 0x40);
- mipi_dsi_dcs_write_seq_multi(&ctx, 0xff, 0x10);
+ nt36523_switch_page(&ctx, 0x10);
mipi_dsi_dcs_write_seq_multi(&ctx, 0xb9, 0x02);
- mipi_dsi_dcs_write_seq_multi(&ctx, 0xff, 0x10);
- mipi_dsi_dcs_write_seq_multi(&ctx, 0xfb, 0x01);
+ nt36523_switch_page(&ctx, 0x10);
+ nt36523_enable_reload_cmds(&ctx);
mipi_dsi_dcs_write_seq_multi(&ctx, 0xb0, 0x01);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x35, 0x00);
mipi_dsi_dcs_write_seq_multi(&ctx, 0x3b, 0x03, 0xae, 0x1a, 0x04, 0x04);
diff --git a/drivers/gpu/drm/panel/panel-edp.c b/drivers/gpu/drm/panel/panel-edp.c
index 3a574a9b46e7..7183df267777 100644
--- a/drivers/gpu/drm/panel/panel-edp.c
+++ b/drivers/gpu/drm/panel/panel-edp.c
@@ -954,16 +954,24 @@ static void panel_edp_shutdown(struct device *dev)
* drm_atomic_helper_shutdown() at shutdown time and that should
* cause the panel to be disabled / unprepared if needed. For now,
* however, we'll keep these calls due to the sheer number of
- * different DRM modeset drivers used with panel-edp. The fact that
- * we're calling these and _also_ the drm_atomic_helper_shutdown()
- * will try to disable/unprepare means that we can get a warning about
- * trying to disable/unprepare an already disabled/unprepared panel,
- * but that's something we'll have to live with until we've confirmed
- * that all DRM modeset drivers are properly calling
- * drm_atomic_helper_shutdown().
+ * different DRM modeset drivers used with panel-edp. Once we've
+ * confirmed that all DRM modeset drivers using this panel properly
+ * call drm_atomic_helper_shutdown() we can simply delete the two
+ * calls below.
+ *
+ * TO BE EXPLICIT: THE CALLS BELOW SHOULDN'T BE COPIED TO ANY NEW
+ * PANEL DRIVERS.
+ *
+ * FIXME: If we're still haven't figured out if all DRM modeset
+ * drivers properly call drm_atomic_helper_shutdown() but we _have_
+ * managed to make sure that DRM modeset drivers get their shutdown()
+ * callback before the panel's shutdown() callback (perhaps using
+ * device link), we could add a WARN_ON here to help move forward.
*/
- drm_panel_disable(&panel->base);
- drm_panel_unprepare(&panel->base);
+ if (panel->base.enabled)
+ drm_panel_disable(&panel->base);
+ if (panel->base.prepared)
+ drm_panel_unprepare(&panel->base);
}
static void panel_edp_remove(struct device *dev)
@@ -1845,7 +1853,10 @@ static const struct edp_panel_entry edp_panels[] = {
EDP_PANEL_ENTRY('A', 'U', 'O', 0x635c, &delay_200_500_e50, "B116XAN06.3"),
EDP_PANEL_ENTRY('A', 'U', 'O', 0x639c, &delay_200_500_e50, "B140HAK02.7"),
EDP_PANEL_ENTRY('A', 'U', 'O', 0x723c, &delay_200_500_e50, "B140XTN07.2"),
+ EDP_PANEL_ENTRY('A', 'U', 'O', 0x73aa, &delay_200_500_e50, "B116XTN02.3"),
EDP_PANEL_ENTRY('A', 'U', 'O', 0x8594, &delay_200_500_e50, "B133UAN01.0"),
+ EDP_PANEL_ENTRY('A', 'U', 'O', 0xa199, &delay_200_500_e50, "B116XAN06.1"),
+ EDP_PANEL_ENTRY('A', 'U', 'O', 0xc4b4, &delay_200_500_e50, "B116XAT04.1"),
EDP_PANEL_ENTRY('A', 'U', 'O', 0xd497, &delay_200_500_e50, "B120XAN01.0"),
EDP_PANEL_ENTRY('A', 'U', 'O', 0xf390, &delay_200_500_e50, "B140XTN07.7"),
@@ -1891,15 +1902,18 @@ static const struct edp_panel_entry edp_panels[] = {
EDP_PANEL_ENTRY('B', 'O', 'E', 0x09ad, &delay_200_500_e80, "NV116WHM-N47"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x09ae, &delay_200_500_e200, "NT140FHM-N45"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x09dd, &delay_200_500_e50, "NT116WHM-N21"),
+ EDP_PANEL_ENTRY('B', 'O', 'E', 0x0a1b, &delay_200_500_e50, "NV133WUM-N63"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x0a36, &delay_200_500_e200, "Unknown"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x0a3e, &delay_200_500_e80, "NV116WHM-N49"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x0a5d, &delay_200_500_e50, "NV116WHM-N45"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x0ac5, &delay_200_500_e50, "NV116WHM-N4C"),
+ EDP_PANEL_ENTRY('B', 'O', 'E', 0x0ae8, &delay_200_500_e50_p2e80, "NV140WUM-N41"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x0b34, &delay_200_500_e80, "NV122WUM-N41"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x0b43, &delay_200_500_e200, "NV140FHM-T09"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x0b56, &delay_200_500_e80, "NT140FHM-N47"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x0c20, &delay_200_500_e80, "NT140FHM-N47"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x0cb6, &delay_200_500_e200, "NT116WHM-N44"),
+ EDP_PANEL_ENTRY('B', 'O', 'E', 0x0cfa, &delay_200_500_e50, "NV116WHM-A4D"),
EDP_PANEL_ENTRY('C', 'M', 'N', 0x1130, &delay_200_500_e50, "N116BGE-EB2"),
EDP_PANEL_ENTRY('C', 'M', 'N', 0x1132, &delay_200_500_e80_d50, "N116BGE-EA2"),
@@ -1915,8 +1929,10 @@ static const struct edp_panel_entry edp_panels[] = {
EDP_PANEL_ENTRY('C', 'M', 'N', 0x1156, &delay_200_500_e80_d50, "Unknown"),
EDP_PANEL_ENTRY('C', 'M', 'N', 0x1157, &delay_200_500_e80_d50, "N116BGE-EA2"),
EDP_PANEL_ENTRY('C', 'M', 'N', 0x115b, &delay_200_500_e80_d50, "N116BCN-EB1"),
+ EDP_PANEL_ENTRY('C', 'M', 'N', 0x115d, &delay_200_500_e80_d50, "N116BCA-EA2"),
EDP_PANEL_ENTRY('C', 'M', 'N', 0x115e, &delay_200_500_e80_d50, "N116BCA-EA1"),
EDP_PANEL_ENTRY('C', 'M', 'N', 0x1160, &delay_200_500_e80_d50, "N116BCJ-EAK"),
+ EDP_PANEL_ENTRY('C', 'M', 'N', 0x1161, &delay_200_500_e80, "N116BCP-EA2"),
EDP_PANEL_ENTRY('C', 'M', 'N', 0x1247, &delay_200_500_e80_d50, "N120ACA-EA1"),
EDP_PANEL_ENTRY('C', 'M', 'N', 0x142b, &delay_200_500_e80_d50, "N140HCA-EAC"),
EDP_PANEL_ENTRY('C', 'M', 'N', 0x142e, &delay_200_500_e80_d50, "N140BGA-EA4"),
@@ -1929,9 +1945,10 @@ static const struct edp_panel_entry edp_panels[] = {
EDP_PANEL_ENTRY('C', 'S', 'O', 0x1200, &delay_200_500_e50_p2e200, "MNC207QS1-1"),
EDP_PANEL_ENTRY('C', 'S', 'W', 0x1100, &delay_200_500_e80_d50, "MNB601LS1-1"),
+ EDP_PANEL_ENTRY('C', 'S', 'W', 0x1104, &delay_200_500_e50, "MNB601LS1-4"),
EDP_PANEL_ENTRY('H', 'K', 'C', 0x2d51, &delay_200_500_e200, "Unknown"),
- EDP_PANEL_ENTRY('H', 'K', 'C', 0x2d5b, &delay_200_500_e200, "Unknown"),
+ EDP_PANEL_ENTRY('H', 'K', 'C', 0x2d5b, &delay_200_500_e200, "MB116AN01"),
EDP_PANEL_ENTRY('H', 'K', 'C', 0x2d5c, &delay_200_500_e200, "MB116AN01-2"),
EDP_PANEL_ENTRY('I', 'V', 'O', 0x048e, &delay_200_500_e200_d10, "M116NWR6 R5"),
diff --git a/drivers/gpu/drm/panel/panel-himax-hx8394.c b/drivers/gpu/drm/panel/panel-himax-hx8394.c
index cb9f46e853de..92b03a2f65a3 100644
--- a/drivers/gpu/drm/panel/panel-himax-hx8394.c
+++ b/drivers/gpu/drm/panel/panel-himax-hx8394.c
@@ -339,6 +339,156 @@ static const struct hx8394_panel_desc powkiddy_x55_desc = {
.init_sequence = powkiddy_x55_init_sequence,
};
+static int mchp_ac40t08a_init_sequence(struct hx8394 *ctx)
+{
+ struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
+
+ /* DCS commands do not seem to be sent correclty without this delay */
+ msleep(20);
+
+ /* 5.19.8 SETEXTC: Set extension command (B9h) */
+ mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETEXTC,
+ 0xff, 0x83, 0x94);
+
+ /* 5.19.9 SETMIPI: Set MIPI control (BAh) */
+ mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETMIPI,
+ 0x63, 0x03, 0x68, 0x6b, 0xb2, 0xc0);
+
+ /* 5.19.2 SETPOWER: Set power (B1h) */
+ mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETPOWER,
+ 0x48, 0x12, 0x72, 0x09, 0x32, 0x54,
+ 0x71, 0x71, 0x57, 0x47);
+
+ /* 5.19.3 SETDISP: Set display related register (B2h) */
+ mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETDISP,
+ 0x00, 0x80, 0x64, 0x0c, 0x0d, 0x2f);
+
+ /* 5.19.4 SETCYC: Set display waveform cycles (B4h) */
+ mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETCYC,
+ 0x73, 0x74, 0x73, 0x74, 0x73, 0x74,
+ 0x01, 0x0c, 0x86, 0x75, 0x00, 0x3f,
+ 0x73, 0x74, 0x73, 0x74, 0x73, 0x74,
+ 0x01, 0x0c, 0x86);
+
+ /* 5.19.5 SETVCOM: Set VCOM voltage (B6h) */
+ mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETVCOM,
+ 0x6e, 0x6e);
+
+ /* 5.19.19 SETGIP0: Set GIP Option0 (D3h) */
+ mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETGIP0,
+ 0x00, 0x00, 0x07, 0x07, 0x40, 0x07,
+ 0x0c, 0x00, 0x08, 0x10, 0x08, 0x00,
+ 0x08, 0x54, 0x15, 0x0a, 0x05, 0x0a,
+ 0x02, 0x15, 0x06, 0x05, 0x06, 0x47,
+ 0x44, 0x0a, 0x0a, 0x4b, 0x10, 0x07,
+ 0x07, 0x0c, 0x40);
+
+ /* 5.19.20 Set GIP Option1 (D5h) */
+ mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETGIP1,
+ 0x1c, 0x1c, 0x1d, 0x1d, 0x00, 0x01,
+ 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0a, 0x0b, 0x24, 0x25,
+ 0x18, 0x18, 0x26, 0x27, 0x18, 0x18,
+ 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
+ 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
+ 0x18, 0x18, 0x20, 0x21, 0x18, 0x18,
+ 0x18, 0x18);
+
+ /* 5.19.21 Set GIP Option2 (D6h) */
+ mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETGIP2,
+ 0x1c, 0x1c, 0x1d, 0x1d, 0x07, 0x06,
+ 0x05, 0x04, 0x03, 0x02, 0x01, 0x00,
+ 0x0b, 0x0a, 0x09, 0x08, 0x21, 0x20,
+ 0x18, 0x18, 0x27, 0x26, 0x18, 0x18,
+ 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
+ 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
+ 0x18, 0x18, 0x25, 0x24, 0x18, 0x18,
+ 0x18, 0x18);
+
+ /* 5.19.25 SETGAMMA: Set gamma curve related setting (E0h) */
+ mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETGAMMA,
+ 0x00, 0x0a, 0x15, 0x1b, 0x1e, 0x21,
+ 0x24, 0x22, 0x47, 0x56, 0x65, 0x66,
+ 0x6e, 0x82, 0x88, 0x8b, 0x9a, 0x9d,
+ 0x98, 0xa8, 0xb9, 0x5d, 0x5c, 0x61,
+ 0x66, 0x6a, 0x6f, 0x7f, 0x7f, 0x00,
+ 0x0a, 0x15, 0x1b, 0x1e, 0x21, 0x24,
+ 0x22, 0x47, 0x56, 0x65, 0x65, 0x6e,
+ 0x81, 0x87, 0x8b, 0x98, 0x9d, 0x99,
+ 0xa8, 0xba, 0x5d, 0x5d, 0x62, 0x67,
+ 0x6b, 0x72, 0x7f, 0x7f);
+
+ /* Unknown command, not listed in the HX8394-F datasheet (C0H) */
+ mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_UNKNOWN1,
+ 0x1f, 0x73);
+
+ /* Set CABC control (C9h)*/
+ mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETCABC,
+ 0x76, 0x00, 0x30);
+
+ /* 5.19.17 SETPANEL (CCh) */
+ mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETPANEL,
+ 0x0b);
+
+ /* Unknown command, not listed in the HX8394-F datasheet (D4h) */
+ mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_UNKNOWN3,
+ 0x02);
+
+ /* 5.19.11 Set register bank (BDh) */
+ mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETREGBANK,
+ 0x02);
+
+ /* 5.19.11 Set register bank (D8h) */
+ mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_UNKNOWN4,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff);
+
+ /* 5.19.11 Set register bank (BDh) */
+ mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETREGBANK,
+ 0x00);
+
+ /* 5.19.11 Set register bank (BDh) */
+ mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETREGBANK,
+ 0x01);
+
+ /* 5.19.2 SETPOWER: Set power (B1h) */
+ mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETPOWER,
+ 0x00);
+
+ /* 5.19.11 Set register bank (BDh) */
+ mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETREGBANK,
+ 0x00);
+
+ /* Unknown command, not listed in the HX8394-F datasheet (C6h) */
+ mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_UNKNOWN2,
+ 0xed);
+
+ return 0;
+}
+
+static const struct drm_display_mode mchp_ac40t08a_mode = {
+ .hdisplay = 720,
+ .hsync_start = 720 + 12,
+ .hsync_end = 720 + 12 + 24,
+ .htotal = 720 + 12 + 12 + 24,
+ .vdisplay = 1280,
+ .vsync_start = 1280 + 13,
+ .vsync_end = 1280 + 14,
+ .vtotal = 1280 + 14 + 13,
+ .clock = 60226,
+ .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC,
+ .width_mm = 76,
+ .height_mm = 132,
+};
+
+static const struct hx8394_panel_desc mchp_ac40t08a_desc = {
+ .mode = &mchp_ac40t08a_mode,
+ .lanes = 4,
+ .mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST,
+ .format = MIPI_DSI_FMT_RGB888,
+ .init_sequence = mchp_ac40t08a_init_sequence,
+};
+
static int hx8394_enable(struct drm_panel *panel)
{
struct hx8394 *ctx = panel_to_hx8394(panel);
@@ -486,7 +636,7 @@ static int hx8394_probe(struct mipi_dsi_device *dsi)
if (!ctx)
return -ENOMEM;
- ctx->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
+ ctx->reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
if (IS_ERR(ctx->reset_gpio))
return dev_err_probe(dev, PTR_ERR(ctx->reset_gpio),
"Failed to get reset gpio\n");
@@ -555,6 +705,7 @@ static void hx8394_remove(struct mipi_dsi_device *dsi)
static const struct of_device_id hx8394_of_match[] = {
{ .compatible = "hannstar,hsd060bhw4", .data = &hsd060bhw4_desc },
{ .compatible = "powkiddy,x55-panel", .data = &powkiddy_x55_desc },
+ { .compatible = "microchip,ac40t08a-mipi-panel", .data = &mchp_ac40t08a_desc },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, hx8394_of_match);
diff --git a/drivers/gpu/drm/panel/panel-ilitek-ili9806e.c b/drivers/gpu/drm/panel/panel-ilitek-ili9806e.c
index e4a44cd26c4d..a3c79ad99d0b 100644
--- a/drivers/gpu/drm/panel/panel-ilitek-ili9806e.c
+++ b/drivers/gpu/drm/panel/panel-ilitek-ili9806e.c
@@ -380,7 +380,172 @@ static const struct panel_desc com35h3p70ulc_desc = {
.lanes = 2,
};
+static void dmt028vghmcmi_1d_init(struct mipi_dsi_multi_context *ctx)
+{
+ mipi_dsi_dcs_write_seq_multi(ctx, 0xff, 0xff, 0x98, 0x06, 0x04, 0x01);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x08, 0x10);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x21, 0x01);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x30, 0x03);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x31, 0x00);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x60, 0x06);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x61, 0x00);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x62, 0x07);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x63, 0x00);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x40, 0x16);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x41, 0x44);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x42, 0x00);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x43, 0x83);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x44, 0x89);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x45, 0x8a);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x46, 0x44);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x47, 0x44);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x50, 0x78);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x51, 0x78);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x52, 0x00);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x53, 0x6c);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x54, 0x00);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x55, 0x6c);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x56, 0x00);
+ /* Gamma settings */
+ mipi_dsi_dcs_write_seq_multi(ctx, 0xa0, 0x00);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0xa1, 0x09);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0xa2, 0x14);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0xa3, 0x09);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0xa4, 0x05);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0xa5, 0x0a);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0xa6, 0x07);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0xa7, 0x07);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0xa8, 0x08);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0xa9, 0x0b);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0xaa, 0x0c);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0xab, 0x05);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0xac, 0x0a);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0xad, 0x19);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0xae, 0x0b);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0xaf, 0x00);
+
+ mipi_dsi_dcs_write_seq_multi(ctx, 0xc0, 0x00);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0xc1, 0x0c);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0xc2, 0x14);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0xc3, 0x11);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0xc4, 0x05);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0xc5, 0x0c);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0xc6, 0x08);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0xc7, 0x03);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0xc8, 0x06);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0xc9, 0x0a);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0xca, 0x10);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0xcb, 0x05);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0xcc, 0x0d);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0xcd, 0x15);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0xce, 0x13);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0xcf, 0x00);
+
+ mipi_dsi_dcs_write_seq_multi(ctx, 0xff, 0xff, 0x98, 0x06, 0x04, 0x07);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x17, 0x22);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x18, 0x1d);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x02, 0x77);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0xe1, 0x79);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x06, 0x13);
+
+ mipi_dsi_dcs_write_seq_multi(ctx, 0xff, 0xff, 0x98, 0x06, 0x04, 0x06);
+ /* GIP 0 */
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x00, 0x21);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x01, 0x0a);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x02, 0x00);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x03, 0x05);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x04, 0x01);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x05, 0x01);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x06, 0x98);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x07, 0x06);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x08, 0x01);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x09, 0x00);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x0a, 0x00);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x0b, 0x00);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x0c, 0x01);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x0d, 0x01);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x0e, 0x00);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x0f, 0x00);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x10, 0xf7);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x11, 0xf0);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x12, 0x00);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x13, 0x00);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x14, 0x00);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x15, 0xc0);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x16, 0x08);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x17, 0x00);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x18, 0x00);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x19, 0x00);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x1a, 0x00);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x1b, 0x00);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x1c, 0x00);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x1d, 0x00);
+ /* GIP 1 */
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x20, 0x01);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x21, 0x23);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x22, 0x44);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x23, 0x67);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x24, 0x01);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x25, 0x23);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x26, 0x45);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x27, 0x67);
+ /* GIP 2 */
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x30, 0x01);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x31, 0x22);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x32, 0x22);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x33, 0xbc);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x34, 0xad);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x35, 0xda);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x36, 0xcb);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x37, 0x22);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x38, 0x55);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x39, 0x76);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x3a, 0x67);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x3b, 0x88);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x3c, 0x22);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x3d, 0x11);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x3e, 0x00);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x3f, 0x22);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x40, 0x22);
+
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x52, 0x10);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x53, 0x10);
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x54, 0x13);
+
+ mipi_dsi_dcs_write_seq_multi(ctx, 0xff, 0xff, 0x98, 0x06, 0x04, 0x00);
+};
+
+static const struct drm_display_mode dmt028vghmcmi_1d_default_mode = {
+ .clock = 22000,
+
+ .hdisplay = 480,
+ .hsync_start = 480 + 20,
+ .hsync_end = 480 + 20 + 4,
+ .htotal = 480 + 20 + 4 + 10,
+
+ .vdisplay = 640,
+ .vsync_start = 640 + 40,
+ .vsync_end = 640 + 40 + 4,
+ .vtotal = 640 + 40 + 4 + 20,
+
+ .width_mm = 53,
+ .height_mm = 79,
+
+ .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC,
+ .type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
+};
+
+static const struct panel_desc dmt028vghmcmi_1d_desc = {
+ .init_sequence = dmt028vghmcmi_1d_init,
+ .display_mode = &dmt028vghmcmi_1d_default_mode,
+ .mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST |
+ MIPI_DSI_MODE_LPM | MIPI_DSI_CLOCK_NON_CONTINUOUS,
+ .format = MIPI_DSI_FMT_RGB888,
+ .lanes = 2,
+};
+
static const struct of_device_id ili9806e_of_match[] = {
+ { .compatible = "densitron,dmt028vghmcmi-1d", .data = &dmt028vghmcmi_1d_desc },
{ .compatible = "ortustech,com35h3p70ulc", .data = &com35h3p70ulc_desc },
{ }
};
diff --git a/drivers/gpu/drm/panel/panel-jadard-jd9365da-h3.c b/drivers/gpu/drm/panel/panel-jadard-jd9365da-h3.c
index c6b669866fed..04d315d96bff 100644
--- a/drivers/gpu/drm/panel/panel-jadard-jd9365da-h3.c
+++ b/drivers/gpu/drm/panel/panel-jadard-jd9365da-h3.c
@@ -48,6 +48,19 @@ struct jadard {
struct gpio_desc *reset;
};
+#define JD9365DA_DCS_SWITCH_PAGE 0xe0
+
+#define jd9365da_switch_page(dsi_ctx, page) \
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, JD9365DA_DCS_SWITCH_PAGE, (page))
+
+static void jadard_enable_standard_cmds(struct mipi_dsi_multi_context *dsi_ctx)
+{
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xe1, 0x93);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xe2, 0x65);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xe3, 0xf8);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0x80, 0x03);
+}
+
static inline struct jadard *panel_to_jadard(struct drm_panel *panel)
{
return container_of(panel, struct jadard, panel);
@@ -198,12 +211,10 @@ static int radxa_display_8hd_ad002_init_cmds(struct jadard *jadard)
{
struct mipi_dsi_multi_context dsi_ctx = { .dsi = jadard->dsi };
- mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xE0, 0x00);
- mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xE1, 0x93);
- mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xE2, 0x65);
- mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xE3, 0xF8);
- mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x80, 0x03);
- mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xE0, 0x01);
+ jd9365da_switch_page(&dsi_ctx, 0x00);
+ jadard_enable_standard_cmds(&dsi_ctx);
+
+ jd9365da_switch_page(&dsi_ctx, 0x01);
mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x00, 0x00);
mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x01, 0x7E);
mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x03, 0x00);
@@ -276,7 +287,8 @@ static int radxa_display_8hd_ad002_init_cmds(struct jadard *jadard)
mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x80, 0x37);
mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x81, 0x23);
mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x82, 0x10);
- mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xE0, 0x02);
+
+ jd9365da_switch_page(&dsi_ctx, 0x02);
mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x00, 0x47);
mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x01, 0x47);
mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x02, 0x45);
@@ -360,13 +372,15 @@ static int radxa_display_8hd_ad002_init_cmds(struct jadard *jadard)
mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x7C, 0x00);
mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x7D, 0x03);
mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x7E, 0x7B);
- mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xE0, 0x04);
+
+ jd9365da_switch_page(&dsi_ctx, 0x04);
mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x00, 0x0E);
mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x02, 0xB3);
mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x09, 0x60);
mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x0E, 0x2A);
mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x36, 0x59);
- mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xE0, 0x00);
+
+ jd9365da_switch_page(&dsi_ctx, 0x00);
return dsi_ctx.accum_err;
};
@@ -398,12 +412,10 @@ static int cz101b4001_init_cmds(struct jadard *jadard)
{
struct mipi_dsi_multi_context dsi_ctx = { .dsi = jadard->dsi };
- mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xE0, 0x00);
- mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xE1, 0x93);
- mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xE2, 0x65);
- mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xE3, 0xF8);
- mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x80, 0x03);
- mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xE0, 0x01);
+ jd9365da_switch_page(&dsi_ctx, 0x00);
+ jadard_enable_standard_cmds(&dsi_ctx);
+
+ jd9365da_switch_page(&dsi_ctx, 0x01);
mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x00, 0x00);
mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x01, 0x3B);
mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x0C, 0x74);
@@ -471,7 +483,8 @@ static int cz101b4001_init_cmds(struct jadard *jadard)
mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x80, 0x20);
mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x81, 0x0F);
mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x82, 0x00);
- mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xE0, 0x02);
+
+ jd9365da_switch_page(&dsi_ctx, 0x02);
mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x00, 0x02);
mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x01, 0x02);
mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x02, 0x00);
@@ -584,12 +597,14 @@ static int cz101b4001_init_cmds(struct jadard *jadard)
mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x7A, 0x17);
mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x7D, 0x14);
mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x7E, 0x82);
- mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xE0, 0x04);
+
+ jd9365da_switch_page(&dsi_ctx, 0x04);
mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x00, 0x0E);
mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x02, 0xB3);
mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x09, 0x61);
mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x0E, 0x48);
- mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xE0, 0x00);
+
+ jd9365da_switch_page(&dsi_ctx, 0x00);
mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xE6, 0x02);
mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xE7, 0x0C);
@@ -623,12 +638,10 @@ static int kingdisplay_kd101ne3_init_cmds(struct jadard *jadard)
{
struct mipi_dsi_multi_context dsi_ctx = { .dsi = jadard->dsi };
- mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xe0, 0x00);
- mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xe1, 0x93);
- mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xe2, 0x65);
- mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xe3, 0xf8);
- mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x80, 0x03);
- mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xe0, 0x01);
+ jd9365da_switch_page(&dsi_ctx, 0x00);
+ jadard_enable_standard_cmds(&dsi_ctx);
+
+ jd9365da_switch_page(&dsi_ctx, 0x01);
mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x0c, 0x74);
mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x17, 0x00);
mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x18, 0xc7);
@@ -694,7 +707,8 @@ static int kingdisplay_kd101ne3_init_cmds(struct jadard *jadard)
mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x80, 0x26);
mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x81, 0x14);
mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x82, 0x02);
- mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xe0, 0x02);
+
+ jd9365da_switch_page(&dsi_ctx, 0x02);
mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x00, 0x52);
mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x01, 0x5f);
mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x02, 0x5f);
@@ -808,12 +822,14 @@ static int kingdisplay_kd101ne3_init_cmds(struct jadard *jadard)
mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x76, 0x00);
mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x77, 0x05);
mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x78, 0x2a);
- mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xe0, 0x04);
+
+ jd9365da_switch_page(&dsi_ctx, 0x04);
mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x00, 0x0e);
mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x02, 0xb3);
mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x09, 0x61);
mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x0e, 0x48);
- mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xe0, 0x00);
+
+ jd9365da_switch_page(&dsi_ctx, 0x00);
return dsi_ctx.accum_err;
};
@@ -850,6 +866,249 @@ static const struct jadard_panel_desc kingdisplay_kd101ne3_40ti_desc = {
.enter_sleep_to_reset_down_delay_ms = 100,
};
+static int melfas_lmfbx101117480_init_cmds(struct jadard *jadard)
+{
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = jadard->dsi };
+
+ jd9365da_switch_page(&dsi_ctx, 0x00);
+ jadard_enable_standard_cmds(&dsi_ctx);
+
+ jd9365da_switch_page(&dsi_ctx, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x0c, 0x74);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x17, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x18, 0xbf);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x19, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x1a, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x1b, 0xbf);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x1c, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x1f, 0x70);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x20, 0x2d);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x21, 0x2d);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x22, 0x7e);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x24, 0xfe);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x37, 0x19);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x35, 0x28);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x38, 0x05);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x39, 0x08);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x3a, 0x12);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x3c, 0x78);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x3d, 0xff);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x3e, 0xff);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x3f, 0x7f);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x40, 0x06);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x41, 0xa0);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x43, 0x1e);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x44, 0x0b);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x0c, 0x74);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x55, 0x02);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x56, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x57, 0x8e);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x58, 0x09);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x59, 0x0a);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x5a, 0x2e);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x5b, 0x1a);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x5c, 0x15);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x5d, 0x7f);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x5e, 0x69);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x5f, 0x59);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x60, 0x4e);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x61, 0x4c);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x62, 0x40);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x63, 0x45);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x64, 0x30);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x65, 0x4a);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x66, 0x49);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x67, 0x4a);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x68, 0x68);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x69, 0x57);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x6a, 0x5b);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x6b, 0x4e);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x6c, 0x49);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x6d, 0x24);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x6e, 0x12);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x6f, 0x02);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x70, 0x7f);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x71, 0x69);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x72, 0x59);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x73, 0x4e);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x74, 0x4c);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x75, 0x40);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x76, 0x45);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x77, 0x30);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x78, 0x4a);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x79, 0x49);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x7a, 0x4a);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x7b, 0x68);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x7c, 0x57);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x7d, 0x5b);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x7e, 0x4e);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x7f, 0x49);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x80, 0x24);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x81, 0x12);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x82, 0x02);
+
+ jd9365da_switch_page(&dsi_ctx, 0x02);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x00, 0x52);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x01, 0x55);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x02, 0x55);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x03, 0x50);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x04, 0x77);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x05, 0x57);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x06, 0x55);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x07, 0x4e);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x08, 0x4c);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x09, 0x5f);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x0a, 0x4a);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x0b, 0x48);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x0c, 0x55);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x0d, 0x46);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x0e, 0x44);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x0f, 0x40);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x10, 0x55);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x11, 0x55);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x12, 0x55);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x13, 0x55);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x14, 0x55);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x15, 0x55);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x16, 0x53);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x17, 0x55);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x18, 0x55);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x19, 0x51);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x1a, 0x77);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x1b, 0x57);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x1c, 0x55);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x1d, 0x4f);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x1e, 0x4d);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x1f, 0x5f);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x20, 0x4b);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x21, 0x49);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x22, 0x55);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x23, 0x47);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x24, 0x45);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x25, 0x41);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x26, 0x55);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x27, 0x55);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x28, 0x55);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x29, 0x55);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x2a, 0x55);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x2b, 0x55);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x2c, 0x13);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x2d, 0x15);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x2e, 0x15);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x2f, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x30, 0x37);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x31, 0x17);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x32, 0x15);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x33, 0x0d);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x34, 0x0f);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x35, 0x15);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x36, 0x05);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x37, 0x07);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x38, 0x15);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x39, 0x09);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x3a, 0x0b);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x3b, 0x11);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x3c, 0x15);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x3d, 0x15);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x3e, 0x15);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x3f, 0x15);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x40, 0x15);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x41, 0x15);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x42, 0x12);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x43, 0x15);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x44, 0x15);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x45, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x46, 0x37);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x47, 0x17);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x48, 0x15);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x49, 0x0c);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x4a, 0x0e);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x4b, 0x15);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x4c, 0x04);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x4d, 0x06);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x4e, 0x15);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x4f, 0x08);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x50, 0x0a);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x51, 0x10);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x52, 0x15);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x53, 0x15);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x54, 0x15);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x55, 0x15);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x56, 0x15);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x57, 0x15);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x58, 0x40);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x5b, 0x10);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x5c, 0x06);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x5d, 0x40);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x5e, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x5f, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x60, 0x40);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x61, 0x03);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x62, 0x04);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x63, 0x6c);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x64, 0x6c);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x65, 0x75);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x66, 0x08);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x67, 0xb4);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x68, 0x08);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x69, 0x6c);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x6a, 0x6c);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x6b, 0x0c);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x6d, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x6e, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x6f, 0x88);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x75, 0xbb);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x76, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x77, 0x05);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x78, 0x2a);
+
+ jd9365da_switch_page(&dsi_ctx, 0x04);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x02, 0x23);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x09, 0x11);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x0e, 0x48);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x36, 0x49);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x2b, 0x08);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x2e, 0x03);
+
+ jd9365da_switch_page(&dsi_ctx, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xe6, 0x02);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xe7, 0x06);
+
+ return dsi_ctx.accum_err;
+};
+
+static const struct jadard_panel_desc melfas_lmfbx101117480_desc = {
+ .mode = {
+ .clock = (800 + 24 + 24 + 24) * (1280 + 30 + 4 + 8) * 60 / 1000,
+
+ .hdisplay = 800,
+ .hsync_start = 800 + 24,
+ .hsync_end = 800 + 24 + 24,
+ .htotal = 800 + 24 + 24 + 24,
+
+ .vdisplay = 1280,
+ .vsync_start = 1280 + 30,
+ .vsync_end = 1280 + 30 + 4,
+ .vtotal = 1280 + 30 + 4 + 8,
+
+ .width_mm = 135,
+ .height_mm = 216,
+ .type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
+ },
+ .lanes = 4,
+ .format = MIPI_DSI_FMT_RGB888,
+ .init = melfas_lmfbx101117480_init_cmds,
+ .lp11_before_reset = true,
+ .reset_before_power_off_vcioo = true,
+ .vcioo_to_lp11_delay_ms = 5,
+ .lp11_to_reset_delay_ms = 10,
+ .exit_sleep_to_display_on_delay_ms = 120,
+ .display_on_delay_ms = 20,
+ .backlight_off_to_display_off_delay_ms = 100,
+ .display_off_to_enter_sleep_delay_ms = 50,
+ .enter_sleep_to_reset_down_delay_ms = 100,
+};
+
static int jadard_dsi_probe(struct mipi_dsi_device *dsi)
{
struct device *dev = &dsi->dev;
@@ -927,6 +1186,10 @@ static const struct of_device_id jadard_of_match[] = {
.data = &kingdisplay_kd101ne3_40ti_desc
},
{
+ .compatible = "melfas,lmfbx101117480",
+ .data = &melfas_lmfbx101117480_desc
+ },
+ {
.compatible = "radxa,display-10hd-ad001",
.data = &cz101b4001_desc
},
diff --git a/drivers/gpu/drm/panel/panel-novatek-nt36672e.c b/drivers/gpu/drm/panel/panel-novatek-nt36672e.c
index e81a70147259..8c9e04207ba9 100644
--- a/drivers/gpu/drm/panel/panel-novatek-nt36672e.c
+++ b/drivers/gpu/drm/panel/panel-novatek-nt36672e.c
@@ -44,6 +44,16 @@ struct nt36672e_panel {
const struct panel_desc *desc;
};
+#define NT36672E_DCS_SWITCH_PAGE 0xff
+
+#define nt36672e_switch_page(ctx, page) \
+ mipi_dsi_dcs_write_seq_multi(ctx, NT36672E_DCS_SWITCH_PAGE, (page))
+
+static void nt36672e_enable_reload_cmds(struct mipi_dsi_multi_context *ctx)
+{
+ mipi_dsi_dcs_write_seq_multi(ctx, 0xfb, 0x01);
+}
+
static inline struct nt36672e_panel *to_nt36672e_panel(struct drm_panel *panel)
{
return container_of(panel, struct nt36672e_panel, panel);
@@ -51,16 +61,16 @@ static inline struct nt36672e_panel *to_nt36672e_panel(struct drm_panel *panel)
static void nt36672e_1080x2408_60hz_init(struct mipi_dsi_multi_context *ctx)
{
- mipi_dsi_dcs_write_seq_multi(ctx, 0xff, 0x10);
- mipi_dsi_dcs_write_seq_multi(ctx, 0xfb, 0x01);
+ nt36672e_switch_page(ctx, 0x10);
+ nt36672e_enable_reload_cmds(ctx);
mipi_dsi_dcs_write_seq_multi(ctx, 0xb0, 0x00);
mipi_dsi_dcs_write_seq_multi(ctx, 0xc0, 0x00);
mipi_dsi_dcs_write_seq_multi(ctx, 0xc1, 0x89, 0x28, 0x00, 0x08, 0x00, 0xaa, 0x02,
0x0e, 0x00, 0x2b, 0x00, 0x07, 0x0d, 0xb7, 0x0c, 0xb7);
-
mipi_dsi_dcs_write_seq_multi(ctx, 0xc2, 0x1b, 0xa0);
- mipi_dsi_dcs_write_seq_multi(ctx, 0xff, 0x20);
- mipi_dsi_dcs_write_seq_multi(ctx, 0xfb, 0x01);
+
+ nt36672e_switch_page(ctx, 0x20);
+ nt36672e_enable_reload_cmds(ctx);
mipi_dsi_dcs_write_seq_multi(ctx, 0x01, 0x66);
mipi_dsi_dcs_write_seq_multi(ctx, 0x06, 0x40);
mipi_dsi_dcs_write_seq_multi(ctx, 0x07, 0x38);
@@ -76,8 +86,9 @@ static void nt36672e_1080x2408_60hz_init(struct mipi_dsi_multi_context *ctx)
mipi_dsi_dcs_write_seq_multi(ctx, 0xf7, 0x54);
mipi_dsi_dcs_write_seq_multi(ctx, 0xf8, 0x64);
mipi_dsi_dcs_write_seq_multi(ctx, 0xf9, 0x54);
- mipi_dsi_dcs_write_seq_multi(ctx, 0xff, 0x24);
- mipi_dsi_dcs_write_seq_multi(ctx, 0xfb, 0x01);
+
+ nt36672e_switch_page(ctx, 0x24);
+ nt36672e_enable_reload_cmds(ctx);
mipi_dsi_dcs_write_seq_multi(ctx, 0x01, 0x0f);
mipi_dsi_dcs_write_seq_multi(ctx, 0x03, 0x0c);
mipi_dsi_dcs_write_seq_multi(ctx, 0x05, 0x1d);
@@ -139,8 +150,9 @@ static void nt36672e_1080x2408_60hz_init(struct mipi_dsi_multi_context *ctx)
mipi_dsi_dcs_write_seq_multi(ctx, 0xc9, 0x00);
mipi_dsi_dcs_write_seq_multi(ctx, 0xd9, 0x80);
mipi_dsi_dcs_write_seq_multi(ctx, 0xe9, 0x02);
- mipi_dsi_dcs_write_seq_multi(ctx, 0xff, 0x25);
- mipi_dsi_dcs_write_seq_multi(ctx, 0xfb, 0x01);
+
+ nt36672e_switch_page(ctx, 0x25);
+ nt36672e_enable_reload_cmds(ctx);
mipi_dsi_dcs_write_seq_multi(ctx, 0x18, 0x22);
mipi_dsi_dcs_write_seq_multi(ctx, 0x19, 0xe4);
mipi_dsi_dcs_write_seq_multi(ctx, 0x21, 0x40);
@@ -164,8 +176,9 @@ static void nt36672e_1080x2408_60hz_init(struct mipi_dsi_multi_context *ctx)
mipi_dsi_dcs_write_seq_multi(ctx, 0xd7, 0x80);
mipi_dsi_dcs_write_seq_multi(ctx, 0xef, 0x20);
mipi_dsi_dcs_write_seq_multi(ctx, 0xf0, 0x84);
- mipi_dsi_dcs_write_seq_multi(ctx, 0xff, 0x26);
- mipi_dsi_dcs_write_seq_multi(ctx, 0xfb, 0x01);
+
+ nt36672e_switch_page(ctx, 0x26);
+ nt36672e_enable_reload_cmds(ctx);
mipi_dsi_dcs_write_seq_multi(ctx, 0x81, 0x0f);
mipi_dsi_dcs_write_seq_multi(ctx, 0x83, 0x01);
mipi_dsi_dcs_write_seq_multi(ctx, 0x84, 0x03);
@@ -185,8 +198,9 @@ static void nt36672e_1080x2408_60hz_init(struct mipi_dsi_multi_context *ctx)
mipi_dsi_dcs_write_seq_multi(ctx, 0x9c, 0x00);
mipi_dsi_dcs_write_seq_multi(ctx, 0x9d, 0x00);
mipi_dsi_dcs_write_seq_multi(ctx, 0x9e, 0x00);
- mipi_dsi_dcs_write_seq_multi(ctx, 0xff, 0x27);
- mipi_dsi_dcs_write_seq_multi(ctx, 0xfb, 0x01);
+
+ nt36672e_switch_page(ctx, 0x27);
+ nt36672e_enable_reload_cmds(ctx);
mipi_dsi_dcs_write_seq_multi(ctx, 0x01, 0x68);
mipi_dsi_dcs_write_seq_multi(ctx, 0x20, 0x81);
mipi_dsi_dcs_write_seq_multi(ctx, 0x21, 0x6a);
@@ -215,8 +229,9 @@ static void nt36672e_1080x2408_60hz_init(struct mipi_dsi_multi_context *ctx)
mipi_dsi_dcs_write_seq_multi(ctx, 0xe6, 0xd3);
mipi_dsi_dcs_write_seq_multi(ctx, 0xeb, 0x03);
mipi_dsi_dcs_write_seq_multi(ctx, 0xec, 0x28);
- mipi_dsi_dcs_write_seq_multi(ctx, 0xff, 0x2a);
- mipi_dsi_dcs_write_seq_multi(ctx, 0xfb, 0x01);
+
+ nt36672e_switch_page(ctx, 0x2a);
+ nt36672e_enable_reload_cmds(ctx);
mipi_dsi_dcs_write_seq_multi(ctx, 0x00, 0x91);
mipi_dsi_dcs_write_seq_multi(ctx, 0x03, 0x20);
mipi_dsi_dcs_write_seq_multi(ctx, 0x07, 0x50);
@@ -260,8 +275,9 @@ static void nt36672e_1080x2408_60hz_init(struct mipi_dsi_multi_context *ctx)
mipi_dsi_dcs_write_seq_multi(ctx, 0x8c, 0x7d);
mipi_dsi_dcs_write_seq_multi(ctx, 0x8d, 0x7d);
mipi_dsi_dcs_write_seq_multi(ctx, 0x8e, 0x7d);
- mipi_dsi_dcs_write_seq_multi(ctx, 0xff, 0x20);
- mipi_dsi_dcs_write_seq_multi(ctx, 0xfb, 0x01);
+
+ nt36672e_switch_page(ctx, 0x20);
+ nt36672e_enable_reload_cmds(ctx);
mipi_dsi_dcs_write_seq_multi(ctx, 0xb0, 0x00, 0x00, 0x00, 0x17, 0x00, 0x49, 0x00,
0x6a, 0x00, 0x89, 0x00, 0x9f, 0x00, 0xb6, 0x00, 0xc8);
mipi_dsi_dcs_write_seq_multi(ctx, 0xb1, 0x00, 0xd9, 0x01, 0x10, 0x01, 0x3a, 0x01,
@@ -286,8 +302,9 @@ static void nt36672e_1080x2408_60hz_init(struct mipi_dsi_multi_context *ctx)
0x01, 0x03, 0x1f, 0x03, 0x4a, 0x03, 0x59, 0x03, 0x6a);
mipi_dsi_dcs_write_seq_multi(ctx, 0xbb, 0x03, 0x7d, 0x03, 0x93, 0x03, 0xab, 0x03,
0xc8, 0x03, 0xec, 0x03, 0xfe, 0x00, 0x00);
- mipi_dsi_dcs_write_seq_multi(ctx, 0xff, 0x21);
- mipi_dsi_dcs_write_seq_multi(ctx, 0xfb, 0x01);
+
+ nt36672e_switch_page(ctx, 0x21);
+ nt36672e_enable_reload_cmds(ctx);
mipi_dsi_dcs_write_seq_multi(ctx, 0xb0, 0x00, 0x00, 0x00, 0x17, 0x00, 0x49, 0x00,
0x6a, 0x00, 0x89, 0x00, 0x9f, 0x00, 0xb6, 0x00, 0xc8);
mipi_dsi_dcs_write_seq_multi(ctx, 0xb1, 0x00, 0xd9, 0x01, 0x10, 0x01, 0x3a, 0x01,
@@ -312,8 +329,9 @@ static void nt36672e_1080x2408_60hz_init(struct mipi_dsi_multi_context *ctx)
0x01, 0x03, 0x1f, 0x03, 0x4a, 0x03, 0x59, 0x03, 0x6a);
mipi_dsi_dcs_write_seq_multi(ctx, 0xbb, 0x03, 0x7d, 0x03, 0x93, 0x03, 0xab, 0x03,
0xc8, 0x03, 0xec, 0x03, 0xfe, 0x00, 0x00);
- mipi_dsi_dcs_write_seq_multi(ctx, 0xff, 0x2c);
- mipi_dsi_dcs_write_seq_multi(ctx, 0xfb, 0x01);
+
+ nt36672e_switch_page(ctx, 0x2c);
+ nt36672e_enable_reload_cmds(ctx);
mipi_dsi_dcs_write_seq_multi(ctx, 0x61, 0x1f);
mipi_dsi_dcs_write_seq_multi(ctx, 0x62, 0x1f);
mipi_dsi_dcs_write_seq_multi(ctx, 0x7e, 0x03);
@@ -327,12 +345,13 @@ static void nt36672e_1080x2408_60hz_init(struct mipi_dsi_multi_context *ctx)
mipi_dsi_dcs_write_seq_multi(ctx, 0x56, 0x0f);
mipi_dsi_dcs_write_seq_multi(ctx, 0x58, 0x0f);
mipi_dsi_dcs_write_seq_multi(ctx, 0x59, 0x0f);
- mipi_dsi_dcs_write_seq_multi(ctx, 0xff, 0xf0);
- mipi_dsi_dcs_write_seq_multi(ctx, 0xfb, 0x01);
+
+ nt36672e_switch_page(ctx, 0xf0);
+ nt36672e_enable_reload_cmds(ctx);
mipi_dsi_dcs_write_seq_multi(ctx, 0x5a, 0x00);
- mipi_dsi_dcs_write_seq_multi(ctx, 0xff, 0x10);
- mipi_dsi_dcs_write_seq_multi(ctx, 0xfb, 0x01);
+ nt36672e_switch_page(ctx, 0x10);
+ nt36672e_enable_reload_cmds(ctx);
mipi_dsi_dcs_write_seq_multi(ctx, 0x51, 0xff);
mipi_dsi_dcs_write_seq_multi(ctx, 0x53, 0x24);
mipi_dsi_dcs_write_seq_multi(ctx, 0x55, 0x01);
diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
index bf40057c5cf3..1b78248cbe4f 100644
--- a/drivers/gpu/drm/panel/panel-simple.c
+++ b/drivers/gpu/drm/panel/panel-simple.c
@@ -726,16 +726,24 @@ static void panel_simple_shutdown(struct device *dev)
* drm_atomic_helper_shutdown() at shutdown time and that should
* cause the panel to be disabled / unprepared if needed. For now,
* however, we'll keep these calls due to the sheer number of
- * different DRM modeset drivers used with panel-simple. The fact that
- * we're calling these and _also_ the drm_atomic_helper_shutdown()
- * will try to disable/unprepare means that we can get a warning about
- * trying to disable/unprepare an already disabled/unprepared panel,
- * but that's something we'll have to live with until we've confirmed
- * that all DRM modeset drivers are properly calling
- * drm_atomic_helper_shutdown().
+ * different DRM modeset drivers used with panel-simple. Once we've
+ * confirmed that all DRM modeset drivers using this panel properly
+ * call drm_atomic_helper_shutdown() we can simply delete the two
+ * calls below.
+ *
+ * TO BE EXPLICIT: THE CALLS BELOW SHOULDN'T BE COPIED TO ANY NEW
+ * PANEL DRIVERS.
+ *
+ * FIXME: If we're still haven't figured out if all DRM modeset
+ * drivers properly call drm_atomic_helper_shutdown() but we _have_
+ * managed to make sure that DRM modeset drivers get their shutdown()
+ * callback before the panel's shutdown() callback (perhaps using
+ * device link), we could add a WARN_ON here to help move forward.
*/
- drm_panel_disable(&panel->base);
- drm_panel_unprepare(&panel->base);
+ if (panel->base.enabled)
+ drm_panel_disable(&panel->base);
+ if (panel->base.prepared)
+ drm_panel_unprepare(&panel->base);
}
static void panel_simple_remove(struct device *dev)
diff --git a/drivers/gpu/drm/panel/panel-sony-tulip-truly-nt35521.c b/drivers/gpu/drm/panel/panel-sony-tulip-truly-nt35521.c
index f2198fa29735..104b2290560e 100644
--- a/drivers/gpu/drm/panel/panel-sony-tulip-truly-nt35521.c
+++ b/drivers/gpu/drm/panel/panel-sony-tulip-truly-nt35521.c
@@ -25,6 +25,12 @@ struct truly_nt35521 {
struct gpio_desc *blen_gpio;
};
+#define NT35521_DCS_SWITCH_PAGE 0xf0
+
+#define nt35521_switch_page(dsi_ctx, page) \
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, NT35521_DCS_SWITCH_PAGE, \
+ 0x55, 0xaa, 0x52, 0x08, (page))
+
static inline
struct truly_nt35521 *to_truly_nt35521(struct drm_panel *panel)
{
@@ -48,7 +54,7 @@ static int truly_nt35521_on(struct truly_nt35521 *ctx)
dsi->mode_flags |= MIPI_DSI_MODE_LPM;
- mipi_dsi_generic_write_seq_multi(&dsi_ctx, 0xf0, 0x55, 0xaa, 0x52, 0x08, 0x00);
+ nt35521_switch_page(&dsi_ctx, 0x00);
mipi_dsi_generic_write_seq_multi(&dsi_ctx, 0xff, 0xaa, 0x55, 0xa5, 0x80);
mipi_dsi_generic_write_seq_multi(&dsi_ctx, 0x6f, 0x11, 0x00);
mipi_dsi_generic_write_seq_multi(&dsi_ctx, 0xf7, 0x20, 0x00);
@@ -59,7 +65,8 @@ static int truly_nt35521_on(struct truly_nt35521 *ctx)
mipi_dsi_generic_write_seq_multi(&dsi_ctx, 0xbb, 0x11, 0x11);
mipi_dsi_generic_write_seq_multi(&dsi_ctx, 0xbc, 0x00, 0x00);
mipi_dsi_generic_write_seq_multi(&dsi_ctx, 0xb6, 0x02);
- mipi_dsi_generic_write_seq_multi(&dsi_ctx, 0xf0, 0x55, 0xaa, 0x52, 0x08, 0x01);
+
+ nt35521_switch_page(&dsi_ctx, 0x01);
mipi_dsi_generic_write_seq_multi(&dsi_ctx, 0xb0, 0x09, 0x09);
mipi_dsi_generic_write_seq_multi(&dsi_ctx, 0xb1, 0x09, 0x09);
mipi_dsi_generic_write_seq_multi(&dsi_ctx, 0xbc, 0x8c, 0x00);
@@ -71,7 +78,8 @@ static int truly_nt35521_on(struct truly_nt35521 *ctx)
mipi_dsi_generic_write_seq_multi(&dsi_ctx, 0xb4, 0x25, 0x25);
mipi_dsi_generic_write_seq_multi(&dsi_ctx, 0xb9, 0x43, 0x43);
mipi_dsi_generic_write_seq_multi(&dsi_ctx, 0xba, 0x24, 0x24);
- mipi_dsi_generic_write_seq_multi(&dsi_ctx, 0xf0, 0x55, 0xaa, 0x52, 0x08, 0x02);
+
+ nt35521_switch_page(&dsi_ctx, 0x02);
mipi_dsi_generic_write_seq_multi(&dsi_ctx, 0xee, 0x03);
mipi_dsi_generic_write_seq_multi(&dsi_ctx, 0xb0,
0x00, 0xb2, 0x00, 0xb3, 0x00, 0xb6, 0x00, 0xc3,
@@ -103,7 +111,8 @@ static int truly_nt35521_on(struct truly_nt35521 *ctx)
0x02, 0x93, 0x02, 0xcd, 0x02, 0xf6, 0x03, 0x31,
0x03, 0x6c, 0x03, 0xe9, 0x03, 0xef, 0x03, 0xf4);
mipi_dsi_generic_write_seq_multi(&dsi_ctx, 0xbb, 0x03, 0xf6, 0x03, 0xf7);
- mipi_dsi_generic_write_seq_multi(&dsi_ctx, 0xf0, 0x55, 0xaa, 0x52, 0x08, 0x03);
+
+ nt35521_switch_page(&dsi_ctx, 0x03);
mipi_dsi_generic_write_seq_multi(&dsi_ctx, 0xb0, 0x22, 0x00);
mipi_dsi_generic_write_seq_multi(&dsi_ctx, 0xb1, 0x22, 0x00);
mipi_dsi_generic_write_seq_multi(&dsi_ctx, 0xb2, 0x05, 0x00, 0x60, 0x00, 0x00);
@@ -122,7 +131,8 @@ static int truly_nt35521_on(struct truly_nt35521 *ctx)
mipi_dsi_generic_write_seq_multi(&dsi_ctx, 0xc5, 0xc0);
mipi_dsi_generic_write_seq_multi(&dsi_ctx, 0xc6, 0x00);
mipi_dsi_generic_write_seq_multi(&dsi_ctx, 0xc7, 0x00);
- mipi_dsi_generic_write_seq_multi(&dsi_ctx, 0xf0, 0x55, 0xaa, 0x52, 0x08, 0x05);
+
+ nt35521_switch_page(&dsi_ctx, 0x05);
mipi_dsi_generic_write_seq_multi(&dsi_ctx, 0xb0, 0x17, 0x06);
mipi_dsi_generic_write_seq_multi(&dsi_ctx, 0xb1, 0x17, 0x06);
mipi_dsi_generic_write_seq_multi(&dsi_ctx, 0xb2, 0x17, 0x06);
@@ -178,7 +188,8 @@ static int truly_nt35521_on(struct truly_nt35521 *ctx)
mipi_dsi_generic_write_seq_multi(&dsi_ctx, 0xeb, 0x00);
mipi_dsi_generic_write_seq_multi(&dsi_ctx, 0xec, 0x00);
mipi_dsi_generic_write_seq_multi(&dsi_ctx, 0xed, 0x30);
- mipi_dsi_generic_write_seq_multi(&dsi_ctx, 0xf0, 0x55, 0xaa, 0x52, 0x08, 0x06);
+
+ nt35521_switch_page(&dsi_ctx, 0x06);
mipi_dsi_generic_write_seq_multi(&dsi_ctx, 0xb0, 0x31, 0x31);
mipi_dsi_generic_write_seq_multi(&dsi_ctx, 0xb1, 0x31, 0x31);
mipi_dsi_generic_write_seq_multi(&dsi_ctx, 0xb2, 0x2d, 0x2e);
@@ -235,10 +246,12 @@ static int truly_nt35521_on(struct truly_nt35521 *ctx)
mipi_dsi_generic_write_seq_multi(&dsi_ctx, 0x6f, 0x11);
mipi_dsi_generic_write_seq_multi(&dsi_ctx, 0xf3, 0x01);
mipi_dsi_generic_write_seq_multi(&dsi_ctx, 0x35, 0x00);
- mipi_dsi_generic_write_seq_multi(&dsi_ctx, 0xf0, 0x55, 0xaa, 0x52, 0x08, 0x00);
+
+ nt35521_switch_page(&dsi_ctx, 0x00);
mipi_dsi_generic_write_seq_multi(&dsi_ctx, 0xd9, 0x02, 0x03, 0x00);
mipi_dsi_generic_write_seq_multi(&dsi_ctx, 0xf0, 0x55, 0xaa, 0x52, 0x00, 0x00);
- mipi_dsi_generic_write_seq_multi(&dsi_ctx, 0xf0, 0x55, 0xaa, 0x52, 0x08, 0x00);
+
+ nt35521_switch_page(&dsi_ctx, 0x00);
mipi_dsi_generic_write_seq_multi(&dsi_ctx, 0xb1, 0x6c, 0x21);
mipi_dsi_generic_write_seq_multi(&dsi_ctx, 0xf0, 0x55, 0xaa, 0x52, 0x00, 0x00);
mipi_dsi_generic_write_seq_multi(&dsi_ctx, 0x35, 0x00);
diff --git a/drivers/gpu/drm/panfrost/panfrost_job.c b/drivers/gpu/drm/panfrost/panfrost_job.c
index a61ef0af9a4e..df49d37d0e7e 100644
--- a/drivers/gpu/drm/panfrost/panfrost_job.c
+++ b/drivers/gpu/drm/panfrost/panfrost_job.c
@@ -727,7 +727,7 @@ panfrost_reset(struct panfrost_device *pfdev,
/* Restart the schedulers */
for (i = 0; i < NUM_JOB_SLOTS; i++)
- drm_sched_start(&pfdev->js->queue[i].sched, true);
+ drm_sched_start(&pfdev->js->queue[i].sched);
/* Re-enable job interrupts now that everything has been restarted. */
job_write(pfdev, JOB_INT_MASK,
diff --git a/drivers/gpu/drm/panthor/panthor_mmu.c b/drivers/gpu/drm/panthor/panthor_mmu.c
index fa0a002b1016..d47972806d50 100644
--- a/drivers/gpu/drm/panthor/panthor_mmu.c
+++ b/drivers/gpu/drm/panthor/panthor_mmu.c
@@ -827,7 +827,7 @@ static void panthor_vm_stop(struct panthor_vm *vm)
static void panthor_vm_start(struct panthor_vm *vm)
{
- drm_sched_start(&vm->sched, true);
+ drm_sched_start(&vm->sched);
}
/**
diff --git a/drivers/gpu/drm/panthor/panthor_sched.c b/drivers/gpu/drm/panthor/panthor_sched.c
index 463bcd3cf00f..c426a392b081 100644
--- a/drivers/gpu/drm/panthor/panthor_sched.c
+++ b/drivers/gpu/drm/panthor/panthor_sched.c
@@ -2538,7 +2538,7 @@ static void queue_start(struct panthor_queue *queue)
list_for_each_entry(job, &queue->scheduler.pending_list, base.list)
job->base.s_fence->parent = dma_fence_get(job->done_fence);
- drm_sched_start(&queue->scheduler, true);
+ drm_sched_start(&queue->scheduler);
}
static void panthor_group_stop(struct panthor_group *group)
diff --git a/drivers/gpu/drm/radeon/evergreen_cs.c b/drivers/gpu/drm/radeon/evergreen_cs.c
index 1fe6e0d883c7..e5577d2a19ef 100644
--- a/drivers/gpu/drm/radeon/evergreen_cs.c
+++ b/drivers/gpu/drm/radeon/evergreen_cs.c
@@ -33,8 +33,10 @@
#include "evergreen_reg_safe.h"
#include "cayman_reg_safe.h"
+#ifndef MIN
#define MAX(a, b) (((a) > (b)) ? (a) : (b))
#define MIN(a, b) (((a) < (b)) ? (a) : (b))
+#endif
#define REG_SAFE_BM_SIZE ARRAY_SIZE(evergreen_reg_safe_bm)
diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c
index 7e90c9f95611..ab53ab486fe6 100644
--- a/drivers/gpu/drm/scheduler/sched_main.c
+++ b/drivers/gpu/drm/scheduler/sched_main.c
@@ -674,13 +674,11 @@ EXPORT_SYMBOL(drm_sched_stop);
* drm_sched_start - recover jobs after a reset
*
* @sched: scheduler instance
- * @full_recovery: proceed with complete sched restart
*
*/
-void drm_sched_start(struct drm_gpu_scheduler *sched, bool full_recovery)
+void drm_sched_start(struct drm_gpu_scheduler *sched)
{
struct drm_sched_job *s_job, *tmp;
- int r;
/*
* Locking the list is not required here as the sched thread is parked
@@ -692,24 +690,17 @@ void drm_sched_start(struct drm_gpu_scheduler *sched, bool full_recovery)
atomic_add(s_job->credits, &sched->credit_count);
- if (!full_recovery)
+ if (!fence) {
+ drm_sched_job_done(s_job, -ECANCELED);
continue;
+ }
- if (fence) {
- r = dma_fence_add_callback(fence, &s_job->cb,
- drm_sched_job_done_cb);
- if (r == -ENOENT)
- drm_sched_job_done(s_job, fence->error);
- else if (r)
- DRM_DEV_ERROR(sched->dev, "fence add callback failed (%d)\n",
- r);
- } else
- drm_sched_job_done(s_job, -ECANCELED);
+ if (dma_fence_add_callback(fence, &s_job->cb,
+ drm_sched_job_done_cb))
+ drm_sched_job_done(s_job, fence->error);
}
- if (full_recovery)
- drm_sched_start_timeout_unlocked(sched);
-
+ drm_sched_start_timeout_unlocked(sched);
drm_sched_wqueue_start(sched);
}
EXPORT_SYMBOL(drm_sched_start);
diff --git a/drivers/gpu/drm/sti/sti_dvo.c b/drivers/gpu/drm/sti/sti_dvo.c
index 48a5d49fc131..68b8197b3dd1 100644
--- a/drivers/gpu/drm/sti/sti_dvo.c
+++ b/drivers/gpu/drm/sti/sti_dvo.c
@@ -582,7 +582,6 @@ MODULE_DEVICE_TABLE(of, dvo_of_match);
struct platform_driver sti_dvo_driver = {
.driver = {
.name = "sti-dvo",
- .owner = THIS_MODULE,
.of_match_table = dvo_of_match,
},
.probe = sti_dvo_probe,
diff --git a/drivers/gpu/drm/sti/sti_hda.c b/drivers/gpu/drm/sti/sti_hda.c
index 6ee35612a14e..f18faad974aa 100644
--- a/drivers/gpu/drm/sti/sti_hda.c
+++ b/drivers/gpu/drm/sti/sti_hda.c
@@ -807,7 +807,6 @@ MODULE_DEVICE_TABLE(of, hda_of_match);
struct platform_driver sti_hda_driver = {
.driver = {
.name = "sti-hda",
- .owner = THIS_MODULE,
.of_match_table = hda_of_match,
},
.probe = sti_hda_probe,
diff --git a/drivers/gpu/drm/sti/sti_hdmi.c b/drivers/gpu/drm/sti/sti_hdmi.c
index 500936d5743c..b0d84440a87b 100644
--- a/drivers/gpu/drm/sti/sti_hdmi.c
+++ b/drivers/gpu/drm/sti/sti_hdmi.c
@@ -1485,7 +1485,6 @@ static void sti_hdmi_remove(struct platform_device *pdev)
struct platform_driver sti_hdmi_driver = {
.driver = {
.name = "sti-hdmi",
- .owner = THIS_MODULE,
.of_match_table = hdmi_of_match,
},
.probe = sti_hdmi_probe,
diff --git a/drivers/gpu/drm/sti/sti_hqvdp.c b/drivers/gpu/drm/sti/sti_hqvdp.c
index 0fb48ac044d8..acbf70b95aeb 100644
--- a/drivers/gpu/drm/sti/sti_hqvdp.c
+++ b/drivers/gpu/drm/sti/sti_hqvdp.c
@@ -1414,7 +1414,6 @@ MODULE_DEVICE_TABLE(of, hqvdp_of_match);
struct platform_driver sti_hqvdp_driver = {
.driver = {
.name = "sti-hqvdp",
- .owner = THIS_MODULE,
.of_match_table = hqvdp_of_match,
},
.probe = sti_hqvdp_probe,
diff --git a/drivers/gpu/drm/sti/sti_tvout.c b/drivers/gpu/drm/sti/sti_tvout.c
index 64615638b79a..e714c232026c 100644
--- a/drivers/gpu/drm/sti/sti_tvout.c
+++ b/drivers/gpu/drm/sti/sti_tvout.c
@@ -886,7 +886,6 @@ MODULE_DEVICE_TABLE(of, tvout_of_match);
struct platform_driver sti_tvout_driver = {
.driver = {
.name = "sti-tvout",
- .owner = THIS_MODULE,
.of_match_table = tvout_of_match,
},
.probe = sti_tvout_probe,
diff --git a/drivers/gpu/drm/sti/sti_vtg.c b/drivers/gpu/drm/sti/sti_vtg.c
index 5e5f82b6a5d9..5ba469b711b5 100644
--- a/drivers/gpu/drm/sti/sti_vtg.c
+++ b/drivers/gpu/drm/sti/sti_vtg.c
@@ -431,7 +431,6 @@ MODULE_DEVICE_TABLE(of, vtg_of_match);
struct platform_driver sti_vtg_driver = {
.driver = {
.name = "sti-vtg",
- .owner = THIS_MODULE,
.of_match_table = vtg_of_match,
},
.probe = vtg_probe,
diff --git a/drivers/gpu/drm/stm/Kconfig b/drivers/gpu/drm/stm/Kconfig
index 1cc6b6cbdfa9..d7f41a87808e 100644
--- a/drivers/gpu/drm/stm/Kconfig
+++ b/drivers/gpu/drm/stm/Kconfig
@@ -2,6 +2,7 @@
config DRM_STM
tristate "DRM Support for STMicroelectronics SoC Series"
depends on DRM && (ARCH_STM32 || COMPILE_TEST)
+ depends on COMMON_CLK
select DRM_KMS_HELPER
select DRM_GEM_DMA_HELPER
select DRM_PANEL_BRIDGE
diff --git a/drivers/gpu/drm/stm/drv.c b/drivers/gpu/drm/stm/drv.c
index e8523abef27a..e1232f74dfa5 100644
--- a/drivers/gpu/drm/stm/drv.c
+++ b/drivers/gpu/drm/stm/drv.c
@@ -25,6 +25,7 @@
#include <drm/drm_module.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_vblank.h>
+#include <drm/drm_managed.h>
#include "ltdc.h"
@@ -75,7 +76,7 @@ static int drv_load(struct drm_device *ddev)
DRM_DEBUG("%s\n", __func__);
- ldev = devm_kzalloc(ddev->dev, sizeof(*ldev), GFP_KERNEL);
+ ldev = drmm_kzalloc(ddev, sizeof(*ldev), GFP_KERNEL);
if (!ldev)
return -ENOMEM;
@@ -203,12 +204,14 @@ static int stm_drm_platform_probe(struct platform_device *pdev)
ret = drm_dev_register(ddev, 0);
if (ret)
- goto err_put;
+ goto err_unload;
drm_fbdev_dma_setup(ddev, 16);
return 0;
+err_unload:
+ drv_unload(ddev);
err_put:
drm_dev_put(ddev);
diff --git a/drivers/gpu/drm/stm/ltdc.c b/drivers/gpu/drm/stm/ltdc.c
index 5576fdae4962..54a73753eff9 100644
--- a/drivers/gpu/drm/stm/ltdc.c
+++ b/drivers/gpu/drm/stm/ltdc.c
@@ -36,6 +36,7 @@
#include <drm/drm_probe_helper.h>
#include <drm/drm_simple_kms_helper.h>
#include <drm/drm_vblank.h>
+#include <drm/drm_managed.h>
#include <video/videomode.h>
@@ -169,6 +170,7 @@
#define IER_RRIE BIT(3) /* Register Reload Interrupt Enable */
#define IER_FUEIE BIT(6) /* Fifo Underrun Error Interrupt Enable */
#define IER_CRCIE BIT(7) /* CRC Error Interrupt Enable */
+#define IER_MASK (IER_LIE | IER_FUWIE | IER_TERRIE | IER_RRIE | IER_FUEIE | IER_CRCIE)
#define CPSR_CYPOS GENMASK(15, 0) /* Current Y position */
@@ -187,6 +189,7 @@
#define LXCR_COLKEN BIT(1) /* Color Keying Enable */
#define LXCR_CLUTEN BIT(4) /* Color Look-Up Table ENable */
#define LXCR_HMEN BIT(8) /* Horizontal Mirroring ENable */
+#define LXCR_MASK (LXCR_LEN | LXCR_COLKEN | LXCR_CLUTEN | LXCR_HMEN)
#define LXWHPCR_WHSTPOS GENMASK(11, 0) /* Window Horizontal StarT POSition */
#define LXWHPCR_WHSPPOS GENMASK(27, 16) /* Window Horizontal StoP POSition */
@@ -491,11 +494,6 @@ static inline struct ltdc_device *plane_to_ltdc(struct drm_plane *plane)
return (struct ltdc_device *)plane->dev->dev_private;
}
-static inline struct ltdc_device *encoder_to_ltdc(struct drm_encoder *enc)
-{
- return (struct ltdc_device *)enc->dev->dev_private;
-}
-
static inline enum ltdc_pix_fmt to_ltdc_pixelformat(u32 drm_fmt)
{
enum ltdc_pix_fmt pf;
@@ -784,7 +782,7 @@ static void ltdc_crtc_atomic_enable(struct drm_crtc *crtc,
regmap_write(ldev->regmap, LTDC_BCCR, BCCR_BCBLACK);
/* Enable IRQ */
- regmap_set_bits(ldev->regmap, LTDC_IER, IER_FUWIE | IER_FUEIE | IER_RRIE | IER_TERRIE);
+ regmap_set_bits(ldev->regmap, LTDC_IER, IER_FUWIE | IER_FUEIE | IER_TERRIE);
/* Commit shadow registers = update planes at next vblank */
if (!ldev->caps.plane_reg_shadow)
@@ -806,11 +804,10 @@ static void ltdc_crtc_atomic_disable(struct drm_crtc *crtc,
/* Disable all layers */
for (layer_index = 0; layer_index < ldev->caps.nb_layers; layer_index++)
- regmap_write_bits(ldev->regmap, LTDC_L1CR + layer_index * LAY_OFS,
- LXCR_CLUTEN | LXCR_LEN, 0);
+ regmap_write_bits(ldev->regmap, LTDC_L1CR + layer_index * LAY_OFS, LXCR_MASK, 0);
- /* disable IRQ */
- regmap_clear_bits(ldev->regmap, LTDC_IER, IER_FUWIE | IER_FUEIE | IER_RRIE | IER_TERRIE);
+ /* Disable IRQ */
+ regmap_clear_bits(ldev->regmap, LTDC_IER, IER_FUWIE | IER_FUEIE | IER_TERRIE);
/* immediately commit disable of layers before switching off LTDC */
if (!ldev->caps.plane_reg_shadow)
@@ -1199,7 +1196,6 @@ static void ltdc_crtc_atomic_print_state(struct drm_printer *p,
}
static const struct drm_crtc_funcs ltdc_crtc_funcs = {
- .destroy = drm_crtc_cleanup,
.set_config = drm_atomic_helper_set_config,
.page_flip = drm_atomic_helper_page_flip,
.reset = drm_atomic_helper_crtc_reset,
@@ -1212,7 +1208,6 @@ static const struct drm_crtc_funcs ltdc_crtc_funcs = {
};
static const struct drm_crtc_funcs ltdc_crtc_with_crc_support_funcs = {
- .destroy = drm_crtc_cleanup,
.set_config = drm_atomic_helper_set_config,
.page_flip = drm_atomic_helper_page_flip,
.reset = drm_atomic_helper_crtc_reset,
@@ -1474,7 +1469,7 @@ static void ltdc_plane_atomic_update(struct drm_plane *plane,
if (newstate->rotation & DRM_MODE_REFLECT_X)
val |= LXCR_HMEN;
- regmap_write_bits(ldev->regmap, LTDC_L1CR + lofs, LXCR_LEN | LXCR_CLUTEN | LXCR_HMEN, val);
+ regmap_write_bits(ldev->regmap, LTDC_L1CR + lofs, LXCR_MASK, val);
/* Commit shadow registers = update plane at next vblank */
if (ldev->caps.plane_reg_shadow)
@@ -1512,7 +1507,10 @@ static void ltdc_plane_atomic_disable(struct drm_plane *plane,
u32 lofs = plane->index * LAY_OFS;
/* Disable layer */
- regmap_write_bits(ldev->regmap, LTDC_L1CR + lofs, LXCR_LEN | LXCR_CLUTEN | LXCR_HMEN, 0);
+ regmap_write_bits(ldev->regmap, LTDC_L1CR + lofs, LXCR_MASK, 0);
+
+ /* Reset the layer transparency to hide any related background color */
+ regmap_write_bits(ldev->regmap, LTDC_L1CACR + lofs, LXCACR_CONSTA, 0x00);
/* Commit shadow registers = update plane at next vblank */
if (ldev->caps.plane_reg_shadow)
@@ -1545,7 +1543,6 @@ static void ltdc_plane_atomic_print_state(struct drm_printer *p,
static const struct drm_plane_funcs ltdc_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
- .destroy = drm_plane_cleanup,
.reset = drm_atomic_helper_plane_reset,
.atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
@@ -1572,7 +1569,6 @@ static struct drm_plane *ltdc_plane_create(struct drm_device *ddev,
const u64 *modifiers = ltdc_format_modifiers;
u32 lofs = index * LAY_OFS;
u32 val;
- int ret;
/* Allocate the biggest size according to supported color formats */
formats = devm_kzalloc(dev, (ldev->caps.pix_fmt_nb +
@@ -1580,6 +1576,8 @@ static struct drm_plane *ltdc_plane_create(struct drm_device *ddev,
ARRAY_SIZE(ltdc_drm_fmt_ycbcr_sp) +
ARRAY_SIZE(ltdc_drm_fmt_ycbcr_fp)) *
sizeof(*formats), GFP_KERNEL);
+ if (!formats)
+ return NULL;
for (i = 0; i < ldev->caps.pix_fmt_nb; i++) {
drm_fmt = ldev->caps.pix_fmt_drm[i];
@@ -1613,14 +1611,10 @@ static struct drm_plane *ltdc_plane_create(struct drm_device *ddev,
}
}
- plane = devm_kzalloc(dev, sizeof(*plane), GFP_KERNEL);
- if (!plane)
- return NULL;
-
- ret = drm_universal_plane_init(ddev, plane, possible_crtcs,
- &ltdc_plane_funcs, formats, nb_fmt,
- modifiers, type, NULL);
- if (ret < 0)
+ plane = drmm_universal_plane_alloc(ddev, struct drm_plane, dev,
+ possible_crtcs, &ltdc_plane_funcs, formats,
+ nb_fmt, modifiers, type, NULL);
+ if (IS_ERR(plane))
return NULL;
if (ldev->caps.ycbcr_input) {
@@ -1643,15 +1637,6 @@ static struct drm_plane *ltdc_plane_create(struct drm_device *ddev,
return plane;
}
-static void ltdc_plane_destroy_all(struct drm_device *ddev)
-{
- struct drm_plane *plane, *plane_temp;
-
- list_for_each_entry_safe(plane, plane_temp,
- &ddev->mode_config.plane_list, head)
- drm_plane_cleanup(plane);
-}
-
static int ltdc_crtc_init(struct drm_device *ddev, struct drm_crtc *crtc)
{
struct ltdc_device *ldev = ddev->dev_private;
@@ -1677,14 +1662,14 @@ static int ltdc_crtc_init(struct drm_device *ddev, struct drm_crtc *crtc)
/* Init CRTC according to its hardware features */
if (ldev->caps.crc)
- ret = drm_crtc_init_with_planes(ddev, crtc, primary, NULL,
- &ltdc_crtc_with_crc_support_funcs, NULL);
+ ret = drmm_crtc_init_with_planes(ddev, crtc, primary, NULL,
+ &ltdc_crtc_with_crc_support_funcs, NULL);
else
- ret = drm_crtc_init_with_planes(ddev, crtc, primary, NULL,
- &ltdc_crtc_funcs, NULL);
+ ret = drmm_crtc_init_with_planes(ddev, crtc, primary, NULL,
+ &ltdc_crtc_funcs, NULL);
if (ret) {
DRM_ERROR("Can not initialize CRTC\n");
- goto cleanup;
+ return ret;
}
drm_crtc_helper_add(crtc, &ltdc_crtc_helper_funcs);
@@ -1698,9 +1683,8 @@ static int ltdc_crtc_init(struct drm_device *ddev, struct drm_crtc *crtc)
for (i = 1; i < ldev->caps.nb_layers; i++) {
overlay = ltdc_plane_create(ddev, DRM_PLANE_TYPE_OVERLAY, i);
if (!overlay) {
- ret = -ENOMEM;
DRM_ERROR("Can not create overlay plane %d\n", i);
- goto cleanup;
+ return -ENOMEM;
}
if (ldev->caps.dynamic_zorder)
drm_plane_create_zpos_property(overlay, i, 0, ldev->caps.nb_layers - 1);
@@ -1713,10 +1697,6 @@ static int ltdc_crtc_init(struct drm_device *ddev, struct drm_crtc *crtc)
}
return 0;
-
-cleanup:
- ltdc_plane_destroy_all(ddev);
- return ret;
}
static void ltdc_encoder_disable(struct drm_encoder *encoder)
@@ -1776,23 +1756,19 @@ static int ltdc_encoder_init(struct drm_device *ddev, struct drm_bridge *bridge)
struct drm_encoder *encoder;
int ret;
- encoder = devm_kzalloc(ddev->dev, sizeof(*encoder), GFP_KERNEL);
- if (!encoder)
- return -ENOMEM;
+ encoder = drmm_simple_encoder_alloc(ddev, struct drm_encoder, dev,
+ DRM_MODE_ENCODER_DPI);
+ if (IS_ERR(encoder))
+ return PTR_ERR(encoder);
encoder->possible_crtcs = CRTC_MASK;
encoder->possible_clones = 0; /* No cloning support */
- drm_simple_encoder_init(ddev, encoder, DRM_MODE_ENCODER_DPI);
-
drm_encoder_helper_add(encoder, &ltdc_encoder_helper_funcs);
ret = drm_bridge_attach(encoder, bridge, NULL, 0);
- if (ret) {
- if (ret != -EPROBE_DEFER)
- drm_encoder_cleanup(encoder);
+ if (ret)
return ret;
- }
DRM_DEBUG_DRIVER("Bridge encoder:%d created\n", encoder->base.id);
@@ -1962,8 +1938,7 @@ int ltdc_load(struct drm_device *ddev)
goto err;
if (panel) {
- bridge = drm_panel_bridge_add_typed(panel,
- DRM_MODE_CONNECTOR_DPI);
+ bridge = drmm_panel_bridge_add(ddev, panel);
if (IS_ERR(bridge)) {
DRM_ERROR("panel-bridge endpoint %d\n", i);
ret = PTR_ERR(bridge);
@@ -2013,13 +1988,8 @@ int ltdc_load(struct drm_device *ddev)
goto err;
}
- /* Disable interrupts */
- if (ldev->caps.fifo_threshold)
- regmap_clear_bits(ldev->regmap, LTDC_IER, IER_LIE | IER_RRIE | IER_FUWIE |
- IER_TERRIE);
- else
- regmap_clear_bits(ldev->regmap, LTDC_IER, IER_LIE | IER_RRIE | IER_FUWIE |
- IER_TERRIE | IER_FUEIE);
+ /* Disable all interrupts */
+ regmap_clear_bits(ldev->regmap, LTDC_IER, IER_MASK);
DRM_DEBUG_DRIVER("ltdc hw version 0x%08x\n", ldev->caps.hw_version);
@@ -2045,7 +2015,7 @@ int ltdc_load(struct drm_device *ddev)
}
}
- crtc = devm_kzalloc(dev, sizeof(*crtc), GFP_KERNEL);
+ crtc = drmm_kzalloc(ddev, sizeof(*crtc), GFP_KERNEL);
if (!crtc) {
DRM_ERROR("Failed to allocate crtc\n");
ret = -ENOMEM;
@@ -2072,9 +2042,6 @@ int ltdc_load(struct drm_device *ddev)
return 0;
err:
- for (i = 0; i < nb_endpoints; i++)
- drm_of_panel_bridge_remove(ddev->dev->of_node, 0, i);
-
clk_disable_unprepare(ldev->pixel_clk);
return ret;
@@ -2082,16 +2049,8 @@ err:
void ltdc_unload(struct drm_device *ddev)
{
- struct device *dev = ddev->dev;
- int nb_endpoints, i;
-
DRM_DEBUG_DRIVER("\n");
- nb_endpoints = of_graph_get_endpoint_count(dev->of_node);
-
- for (i = 0; i < nb_endpoints; i++)
- drm_of_panel_bridge_remove(ddev->dev->of_node, 0, i);
-
pm_runtime_disable(ddev->dev);
}
diff --git a/drivers/gpu/drm/stm/lvds.c b/drivers/gpu/drm/stm/lvds.c
index 2fa2c81784e9..06f2d7a56cc9 100644
--- a/drivers/gpu/drm/stm/lvds.c
+++ b/drivers/gpu/drm/stm/lvds.c
@@ -1210,7 +1210,6 @@ static struct platform_driver lvds_platform_driver = {
.remove = lvds_remove,
.driver = {
.name = "stm32-display-lvds",
- .owner = THIS_MODULE,
.of_match_table = lvds_dt_ids,
},
};
diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c
index 03d1c76aec2d..d9f0728c3afd 100644
--- a/drivers/gpu/drm/tegra/drm.c
+++ b/drivers/gpu/drm/tegra/drm.c
@@ -1330,6 +1330,11 @@ static int host1x_drm_remove(struct host1x_device *dev)
return 0;
}
+static void host1x_drm_shutdown(struct host1x_device *dev)
+{
+ drm_atomic_helper_shutdown(dev_get_drvdata(&dev->dev));
+}
+
#ifdef CONFIG_PM_SLEEP
static int host1x_drm_suspend(struct device *dev)
{
@@ -1398,6 +1403,7 @@ static struct host1x_driver host1x_drm_driver = {
},
.probe = host1x_drm_probe,
.remove = host1x_drm_remove,
+ .shutdown = host1x_drm_shutdown,
.subdevs = host1x_drm_subdevs,
};
diff --git a/drivers/gpu/drm/tests/drm_gem_shmem_test.c b/drivers/gpu/drm/tests/drm_gem_shmem_test.c
index c3758faa1b83..f7f7d8b0f61f 100644
--- a/drivers/gpu/drm/tests/drm_gem_shmem_test.c
+++ b/drivers/gpu/drm/tests/drm_gem_shmem_test.c
@@ -23,29 +23,16 @@
#define TEST_BYTE 0xae
/*
- * Wrappers to avoid an explicit type casting when passing action
- * functions to kunit_add_action().
+ * Wrappers to avoid cast warnings when passing action functions
+ * directly to kunit_add_action().
*/
-static void kfree_wrapper(void *ptr)
-{
- const void *obj = ptr;
-
- kfree(obj);
-}
-
-static void sg_free_table_wrapper(void *ptr)
-{
- struct sg_table *sgt = ptr;
+KUNIT_DEFINE_ACTION_WRAPPER(kfree_wrapper, kfree, const void *);
- sg_free_table(sgt);
-}
-
-static void drm_gem_shmem_free_wrapper(void *ptr)
-{
- struct drm_gem_shmem_object *shmem = ptr;
+KUNIT_DEFINE_ACTION_WRAPPER(sg_free_table_wrapper, sg_free_table,
+ struct sg_table *);
- drm_gem_shmem_free(shmem);
-}
+KUNIT_DEFINE_ACTION_WRAPPER(drm_gem_shmem_free_wrapper, drm_gem_shmem_free,
+ struct drm_gem_shmem_object *);
/*
* Test creating a shmem GEM object backed by shmem buffer. The test
diff --git a/drivers/gpu/drm/ttm/tests/ttm_bo_test.c b/drivers/gpu/drm/ttm/tests/ttm_bo_test.c
index d1b32303d051..f0a7eb62116c 100644
--- a/drivers/gpu/drm/ttm/tests/ttm_bo_test.c
+++ b/drivers/gpu/drm/ttm/tests/ttm_bo_test.c
@@ -271,7 +271,7 @@ static void ttm_bo_unreserve_basic(struct kunit *test)
man = ttm_manager_type(priv->ttm_dev, mem_type);
KUNIT_ASSERT_EQ(test,
- list_is_last(&res1->lru, &man->lru[bo->priority]), 1);
+ list_is_last(&res1->lru.link, &man->lru[bo->priority]), 1);
ttm_resource_free(bo, &res2);
ttm_resource_free(bo, &res1);
@@ -308,11 +308,11 @@ static void ttm_bo_unreserve_pinned(struct kunit *test)
err = ttm_resource_alloc(bo, place, &res2);
KUNIT_ASSERT_EQ(test, err, 0);
KUNIT_ASSERT_EQ(test,
- list_is_last(&res2->lru, &priv->ttm_dev->pinned), 1);
+ list_is_last(&res2->lru.link, &priv->ttm_dev->pinned), 1);
ttm_bo_unreserve(bo);
KUNIT_ASSERT_EQ(test,
- list_is_last(&res1->lru, &priv->ttm_dev->pinned), 1);
+ list_is_last(&res1->lru.link, &priv->ttm_dev->pinned), 1);
ttm_resource_free(bo, &res1);
ttm_resource_free(bo, &res2);
diff --git a/drivers/gpu/drm/ttm/tests/ttm_resource_test.c b/drivers/gpu/drm/ttm/tests/ttm_resource_test.c
index 9c2f13e53162..22260e7aea58 100644
--- a/drivers/gpu/drm/ttm/tests/ttm_resource_test.c
+++ b/drivers/gpu/drm/ttm/tests/ttm_resource_test.c
@@ -198,7 +198,7 @@ static void ttm_resource_fini_basic(struct kunit *test)
ttm_resource_init(bo, place, res);
ttm_resource_fini(man, res);
- KUNIT_ASSERT_TRUE(test, list_empty(&res->lru));
+ KUNIT_ASSERT_TRUE(test, list_empty(&res->lru.link));
KUNIT_ASSERT_EQ(test, man->usage, 0);
}
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 2427be8bc97f..320592435252 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -224,80 +224,6 @@ static void ttm_bo_flush_all_fences(struct ttm_buffer_object *bo)
dma_resv_iter_end(&cursor);
}
-/**
- * ttm_bo_cleanup_refs
- * If bo idle, remove from lru lists, and unref.
- * If not idle, block if possible.
- *
- * Must be called with lru_lock and reservation held, this function
- * will drop the lru lock and optionally the reservation lock before returning.
- *
- * @bo: The buffer object to clean-up
- * @interruptible: Any sleeps should occur interruptibly.
- * @no_wait_gpu: Never wait for gpu. Return -EBUSY instead.
- * @unlock_resv: Unlock the reservation lock as well.
- */
-
-static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
- bool interruptible, bool no_wait_gpu,
- bool unlock_resv)
-{
- struct dma_resv *resv = &bo->base._resv;
- int ret;
-
- if (dma_resv_test_signaled(resv, DMA_RESV_USAGE_BOOKKEEP))
- ret = 0;
- else
- ret = -EBUSY;
-
- if (ret && !no_wait_gpu) {
- long lret;
-
- if (unlock_resv)
- dma_resv_unlock(bo->base.resv);
- spin_unlock(&bo->bdev->lru_lock);
-
- lret = dma_resv_wait_timeout(resv, DMA_RESV_USAGE_BOOKKEEP,
- interruptible,
- 30 * HZ);
-
- if (lret < 0)
- return lret;
- else if (lret == 0)
- return -EBUSY;
-
- spin_lock(&bo->bdev->lru_lock);
- if (unlock_resv && !dma_resv_trylock(bo->base.resv)) {
- /*
- * We raced, and lost, someone else holds the reservation now,
- * and is probably busy in ttm_bo_cleanup_memtype_use.
- *
- * Even if it's not the case, because we finished waiting any
- * delayed destruction would succeed, so just return success
- * here.
- */
- spin_unlock(&bo->bdev->lru_lock);
- return 0;
- }
- ret = 0;
- }
-
- if (ret) {
- if (unlock_resv)
- dma_resv_unlock(bo->base.resv);
- spin_unlock(&bo->bdev->lru_lock);
- return ret;
- }
-
- spin_unlock(&bo->bdev->lru_lock);
- ttm_bo_cleanup_memtype_use(bo);
-
- if (unlock_resv)
- dma_resv_unlock(bo->base.resv);
-
- return 0;
-}
-
/*
* Block for the dma_resv object to become idle, lock the buffer and clean up
* the resource and tt object.
@@ -506,150 +432,152 @@ bool ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
}
EXPORT_SYMBOL(ttm_bo_eviction_valuable);
-/*
- * Check the target bo is allowable to be evicted or swapout, including cases:
- *
- * a. if share same reservation object with ctx->resv, have assumption
- * reservation objects should already be locked, so not lock again and
- * return true directly when either the opreation allow_reserved_eviction
- * or the target bo already is in delayed free list;
+/**
+ * ttm_bo_evict_first() - Evict the first bo on the manager's LRU list.
+ * @bdev: The ttm device.
+ * @man: The manager whose bo to evict.
+ * @ctx: The TTM operation ctx governing the eviction.
*
- * b. Otherwise, trylock it.
+ * Return: 0 if successful or the resource disappeared. Negative error code on error.
*/
-static bool ttm_bo_evict_swapout_allowable(struct ttm_buffer_object *bo,
- struct ttm_operation_ctx *ctx,
- const struct ttm_place *place,
- bool *locked, bool *busy)
+int ttm_bo_evict_first(struct ttm_device *bdev, struct ttm_resource_manager *man,
+ struct ttm_operation_ctx *ctx)
{
- bool ret = false;
+ struct ttm_resource_cursor cursor;
+ struct ttm_buffer_object *bo;
+ struct ttm_resource *res;
+ unsigned int mem_type;
+ int ret = 0;
- if (bo->pin_count) {
- *locked = false;
- if (busy)
- *busy = false;
- return false;
+ spin_lock(&bdev->lru_lock);
+ res = ttm_resource_manager_first(man, &cursor);
+ ttm_resource_cursor_fini(&cursor);
+ if (!res) {
+ ret = -ENOENT;
+ goto out_no_ref;
}
+ bo = res->bo;
+ if (!ttm_bo_get_unless_zero(bo))
+ goto out_no_ref;
+ mem_type = res->mem_type;
+ spin_unlock(&bdev->lru_lock);
+ ret = ttm_bo_reserve(bo, ctx->interruptible, ctx->no_wait_gpu, NULL);
+ if (ret)
+ goto out_no_lock;
+ if (!bo->resource || bo->resource->mem_type != mem_type)
+ goto out_bo_moved;
- if (bo->base.resv == ctx->resv) {
- dma_resv_assert_held(bo->base.resv);
- if (ctx->allow_res_evict)
- ret = true;
- *locked = false;
- if (busy)
- *busy = false;
+ if (bo->deleted) {
+ ret = ttm_bo_wait_ctx(bo, ctx);
+ if (!ret)
+ ttm_bo_cleanup_memtype_use(bo);
} else {
- ret = dma_resv_trylock(bo->base.resv);
- *locked = ret;
- if (busy)
- *busy = !ret;
- }
-
- if (ret && place && (bo->resource->mem_type != place->mem_type ||
- !bo->bdev->funcs->eviction_valuable(bo, place))) {
- ret = false;
- if (*locked) {
- dma_resv_unlock(bo->base.resv);
- *locked = false;
- }
+ ret = ttm_bo_evict(bo, ctx);
}
+out_bo_moved:
+ dma_resv_unlock(bo->base.resv);
+out_no_lock:
+ ttm_bo_put(bo);
+ return ret;
+out_no_ref:
+ spin_unlock(&bdev->lru_lock);
return ret;
}
/**
- * ttm_mem_evict_wait_busy - wait for a busy BO to become available
- *
- * @busy_bo: BO which couldn't be locked with trylock
- * @ctx: operation context
- * @ticket: acquire ticket
- *
- * Try to lock a busy buffer object to avoid failing eviction.
+ * struct ttm_bo_evict_walk - Parameters for the evict walk.
*/
-static int ttm_mem_evict_wait_busy(struct ttm_buffer_object *busy_bo,
- struct ttm_operation_ctx *ctx,
- struct ww_acquire_ctx *ticket)
+struct ttm_bo_evict_walk {
+ /** @walk: The walk base parameters. */
+ struct ttm_lru_walk walk;
+ /** @place: The place passed to the resource allocation. */
+ const struct ttm_place *place;
+ /** @evictor: The buffer object we're trying to make room for. */
+ struct ttm_buffer_object *evictor;
+ /** @res: The allocated resource if any. */
+ struct ttm_resource **res;
+ /** @evicted: Number of successful evictions. */
+ unsigned long evicted;
+};
+
+static s64 ttm_bo_evict_cb(struct ttm_lru_walk *walk, struct ttm_buffer_object *bo)
{
- int r;
-
- if (!busy_bo || !ticket)
- return -EBUSY;
-
- if (ctx->interruptible)
- r = dma_resv_lock_interruptible(busy_bo->base.resv,
- ticket);
- else
- r = dma_resv_lock(busy_bo->base.resv, ticket);
-
- /*
- * TODO: It would be better to keep the BO locked until allocation is at
- * least tried one more time, but that would mean a much larger rework
- * of TTM.
- */
- if (!r)
- dma_resv_unlock(busy_bo->base.resv);
-
- return r == -EDEADLK ? -EBUSY : r;
-}
-
-int ttm_mem_evict_first(struct ttm_device *bdev,
- struct ttm_resource_manager *man,
- const struct ttm_place *place,
- struct ttm_operation_ctx *ctx,
- struct ww_acquire_ctx *ticket)
-{
- struct ttm_buffer_object *bo = NULL, *busy_bo = NULL;
- struct ttm_resource_cursor cursor;
- struct ttm_resource *res;
- bool locked = false;
- int ret;
+ struct ttm_bo_evict_walk *evict_walk =
+ container_of(walk, typeof(*evict_walk), walk);
+ s64 lret;
- spin_lock(&bdev->lru_lock);
- ttm_resource_manager_for_each_res(man, &cursor, res) {
- bool busy;
-
- if (!ttm_bo_evict_swapout_allowable(res->bo, ctx, place,
- &locked, &busy)) {
- if (busy && !busy_bo && ticket !=
- dma_resv_locking_ctx(res->bo->base.resv))
- busy_bo = res->bo;
- continue;
- }
+ if (bo->pin_count || !bo->bdev->funcs->eviction_valuable(bo, evict_walk->place))
+ return 0;
- if (ttm_bo_get_unless_zero(res->bo)) {
- bo = res->bo;
- break;
- }
- if (locked)
- dma_resv_unlock(res->bo->base.resv);
+ if (bo->deleted) {
+ lret = ttm_bo_wait_ctx(bo, walk->ctx);
+ if (!lret)
+ ttm_bo_cleanup_memtype_use(bo);
+ } else {
+ lret = ttm_bo_evict(bo, walk->ctx);
}
- if (!bo) {
- if (busy_bo && !ttm_bo_get_unless_zero(busy_bo))
- busy_bo = NULL;
- spin_unlock(&bdev->lru_lock);
- ret = ttm_mem_evict_wait_busy(busy_bo, ctx, ticket);
- if (busy_bo)
- ttm_bo_put(busy_bo);
- return ret;
- }
+ if (lret)
+ goto out;
- if (bo->deleted) {
- ret = ttm_bo_cleanup_refs(bo, ctx->interruptible,
- ctx->no_wait_gpu, locked);
- ttm_bo_put(bo);
- return ret;
- }
+ evict_walk->evicted++;
+ if (evict_walk->res)
+ lret = ttm_resource_alloc(evict_walk->evictor, evict_walk->place,
+ evict_walk->res);
+ if (lret == 0)
+ return 1;
+out:
+ /* Errors that should terminate the walk. */
+ if (lret == -ENOSPC)
+ return -EBUSY;
- spin_unlock(&bdev->lru_lock);
+ return lret;
+}
- ret = ttm_bo_evict(bo, ctx);
- if (locked)
- ttm_bo_unreserve(bo);
- else
- ttm_bo_move_to_lru_tail_unlocked(bo);
+static const struct ttm_lru_walk_ops ttm_evict_walk_ops = {
+ .process_bo = ttm_bo_evict_cb,
+};
+
+static int ttm_bo_evict_alloc(struct ttm_device *bdev,
+ struct ttm_resource_manager *man,
+ const struct ttm_place *place,
+ struct ttm_buffer_object *evictor,
+ struct ttm_operation_ctx *ctx,
+ struct ww_acquire_ctx *ticket,
+ struct ttm_resource **res)
+{
+ struct ttm_bo_evict_walk evict_walk = {
+ .walk = {
+ .ops = &ttm_evict_walk_ops,
+ .ctx = ctx,
+ .ticket = ticket,
+ },
+ .place = place,
+ .evictor = evictor,
+ .res = res,
+ };
+ s64 lret;
+
+ evict_walk.walk.trylock_only = true;
+ lret = ttm_lru_walk_for_evict(&evict_walk.walk, bdev, man, 1);
+ if (lret || !ticket)
+ goto out;
- ttm_bo_put(bo);
- return ret;
+ /* If ticket-locking, repeat while making progress. */
+ evict_walk.walk.trylock_only = false;
+ do {
+ /* The walk may clear the evict_walk.walk.ticket field */
+ evict_walk.walk.ticket = ticket;
+ evict_walk.evicted = 0;
+ lret = ttm_lru_walk_for_evict(&evict_walk.walk, bdev, man, 1);
+ } while (!lret && evict_walk.evicted);
+out:
+ if (lret < 0)
+ return lret;
+ if (lret == 0)
+ return -EBUSY;
+ return 0;
}
/**
@@ -760,6 +688,7 @@ static int ttm_bo_alloc_resource(struct ttm_buffer_object *bo,
for (i = 0; i < placement->num_placement; ++i) {
const struct ttm_place *place = &placement->placement[i];
struct ttm_resource_manager *man;
+ bool may_evict;
man = ttm_manager_type(bdev, place->mem_type);
if (!man || !ttm_resource_manager_used(man))
@@ -769,22 +698,21 @@ static int ttm_bo_alloc_resource(struct ttm_buffer_object *bo,
TTM_PL_FLAG_FALLBACK))
continue;
- do {
- ret = ttm_resource_alloc(bo, place, res);
- if (unlikely(ret && ret != -ENOSPC))
+ may_evict = (force_space && place->mem_type != TTM_PL_SYSTEM);
+ ret = ttm_resource_alloc(bo, place, res);
+ if (ret) {
+ if (ret != -ENOSPC)
return ret;
- if (likely(!ret) || !force_space)
- break;
-
- ret = ttm_mem_evict_first(bdev, man, place, ctx,
- ticket);
- if (unlikely(ret == -EBUSY))
- break;
- if (unlikely(ret))
+ if (!may_evict)
+ continue;
+
+ ret = ttm_bo_evict_alloc(bdev, man, place, bo, ctx,
+ ticket, res);
+ if (ret == -EBUSY)
+ continue;
+ if (ret)
return ret;
- } while (1);
- if (ret)
- continue;
+ }
ret = ttm_bo_add_move_fence(bo, man, ctx->no_wait_gpu);
if (unlikely(ret)) {
@@ -1118,12 +1046,24 @@ int ttm_bo_wait_ctx(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx)
}
EXPORT_SYMBOL(ttm_bo_wait_ctx);
-int ttm_bo_swapout(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx,
- gfp_t gfp_flags)
+/**
+ * struct ttm_bo_swapout_walk - Parameters for the swapout walk
+ */
+struct ttm_bo_swapout_walk {
+ /** @walk: The walk base parameters. */
+ struct ttm_lru_walk walk;
+ /** @gfp_flags: The gfp flags to use for ttm_tt_swapout() */
+ gfp_t gfp_flags;
+};
+
+static s64
+ttm_bo_swapout_cb(struct ttm_lru_walk *walk, struct ttm_buffer_object *bo)
{
- struct ttm_place place;
- bool locked;
- long ret;
+ struct ttm_place place = {.mem_type = bo->resource->mem_type};
+ struct ttm_bo_swapout_walk *swapout_walk =
+ container_of(walk, typeof(*swapout_walk), walk);
+ struct ttm_operation_ctx *ctx = walk->ctx;
+ s64 ret;
/*
* While the bo may already reside in SYSTEM placement, set
@@ -1131,28 +1071,29 @@ int ttm_bo_swapout(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx,
* The driver may use the fact that we're moving from SYSTEM
* as an indication that we're about to swap out.
*/
- memset(&place, 0, sizeof(place));
- place.mem_type = bo->resource->mem_type;
- if (!ttm_bo_evict_swapout_allowable(bo, ctx, &place, &locked, NULL))
- return -EBUSY;
+ if (bo->pin_count || !bo->bdev->funcs->eviction_valuable(bo, &place)) {
+ ret = -EBUSY;
+ goto out;
+ }
if (!bo->ttm || !ttm_tt_is_populated(bo->ttm) ||
bo->ttm->page_flags & TTM_TT_FLAG_EXTERNAL ||
- bo->ttm->page_flags & TTM_TT_FLAG_SWAPPED ||
- !ttm_bo_get_unless_zero(bo)) {
- if (locked)
- dma_resv_unlock(bo->base.resv);
- return -EBUSY;
+ bo->ttm->page_flags & TTM_TT_FLAG_SWAPPED) {
+ ret = -EBUSY;
+ goto out;
}
if (bo->deleted) {
- ret = ttm_bo_cleanup_refs(bo, false, false, locked);
- ttm_bo_put(bo);
- return ret == -EBUSY ? -ENOSPC : ret;
- }
+ pgoff_t num_pages = bo->ttm->num_pages;
- /* TODO: Cleanup the locking */
- spin_unlock(&bo->bdev->lru_lock);
+ ret = ttm_bo_wait_ctx(bo, ctx);
+ if (ret)
+ goto out;
+
+ ttm_bo_cleanup_memtype_use(bo);
+ ret = num_pages;
+ goto out;
+ }
/*
* Move to system cached
@@ -1164,12 +1105,13 @@ int ttm_bo_swapout(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx,
memset(&hop, 0, sizeof(hop));
place.mem_type = TTM_PL_SYSTEM;
ret = ttm_resource_alloc(bo, &place, &evict_mem);
- if (unlikely(ret))
+ if (ret)
goto out;
ret = ttm_bo_handle_move_mem(bo, evict_mem, true, ctx, &hop);
- if (unlikely(ret != 0)) {
- WARN(ret == -EMULTIHOP, "Unexpected multihop in swaput - likely driver bug.\n");
+ if (ret) {
+ WARN(ret == -EMULTIHOP,
+ "Unexpected multihop in swapout - likely driver bug.\n");
ttm_resource_free(bo, &evict_mem);
goto out;
}
@@ -1179,30 +1121,54 @@ int ttm_bo_swapout(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx,
* Make sure BO is idle.
*/
ret = ttm_bo_wait_ctx(bo, ctx);
- if (unlikely(ret != 0))
+ if (ret)
goto out;
ttm_bo_unmap_virtual(bo);
-
- /*
- * Swap out. Buffer will be swapped in again as soon as
- * anyone tries to access a ttm page.
- */
if (bo->bdev->funcs->swap_notify)
bo->bdev->funcs->swap_notify(bo);
if (ttm_tt_is_populated(bo->ttm))
- ret = ttm_tt_swapout(bo->bdev, bo->ttm, gfp_flags);
+ ret = ttm_tt_swapout(bo->bdev, bo->ttm, swapout_walk->gfp_flags);
+
out:
+ /* Consider -ENOMEM and -ENOSPC non-fatal. */
+ if (ret == -ENOMEM || ret == -ENOSPC)
+ ret = -EBUSY;
- /*
- * Unreserve without putting on LRU to avoid swapping out an
- * already swapped buffer.
- */
- if (locked)
- dma_resv_unlock(bo->base.resv);
- ttm_bo_put(bo);
- return ret == -EBUSY ? -ENOSPC : ret;
+ return ret;
+}
+
+const struct ttm_lru_walk_ops ttm_swap_ops = {
+ .process_bo = ttm_bo_swapout_cb,
+};
+
+/**
+ * ttm_bo_swapout() - Swap out buffer objects on the LRU list to shmem.
+ * @bdev: The ttm device.
+ * @ctx: The ttm_operation_ctx governing the swapout operation.
+ * @man: The resource manager whose resources / buffer objects are
+ * goint to be swapped out.
+ * @gfp_flags: The gfp flags used for shmem page allocations.
+ * @target: The desired number of bytes to swap out.
+ *
+ * Return: The number of bytes actually swapped out, or negative error code
+ * on error.
+ */
+s64 ttm_bo_swapout(struct ttm_device *bdev, struct ttm_operation_ctx *ctx,
+ struct ttm_resource_manager *man, gfp_t gfp_flags,
+ s64 target)
+{
+ struct ttm_bo_swapout_walk swapout_walk = {
+ .walk = {
+ .ops = &ttm_swap_ops,
+ .ctx = ctx,
+ .trylock_only = true,
+ },
+ .gfp_flags = gfp_flags,
+ };
+
+ return ttm_lru_walk_for_evict(&swapout_walk.walk, bdev, man, target);
}
void ttm_bo_tt_destroy(struct ttm_buffer_object *bo)
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index 0b3f4267130c..3c07f4712d5c 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -768,3 +768,154 @@ error_destroy_tt:
ttm_tt_destroy(bo->bdev, ttm);
return ret;
}
+
+static bool ttm_lru_walk_trylock(struct ttm_lru_walk *walk,
+ struct ttm_buffer_object *bo,
+ bool *needs_unlock)
+{
+ struct ttm_operation_ctx *ctx = walk->ctx;
+
+ *needs_unlock = false;
+
+ if (dma_resv_trylock(bo->base.resv)) {
+ *needs_unlock = true;
+ return true;
+ }
+
+ if (bo->base.resv == ctx->resv && ctx->allow_res_evict) {
+ dma_resv_assert_held(bo->base.resv);
+ return true;
+ }
+
+ return false;
+}
+
+static int ttm_lru_walk_ticketlock(struct ttm_lru_walk *walk,
+ struct ttm_buffer_object *bo,
+ bool *needs_unlock)
+{
+ struct dma_resv *resv = bo->base.resv;
+ int ret;
+
+ if (walk->ctx->interruptible)
+ ret = dma_resv_lock_interruptible(resv, walk->ticket);
+ else
+ ret = dma_resv_lock(resv, walk->ticket);
+
+ if (!ret) {
+ *needs_unlock = true;
+ /*
+ * Only a single ticketlock per loop. Ticketlocks are prone
+ * to return -EDEADLK causing the eviction to fail, so
+ * after waiting for the ticketlock, revert back to
+ * trylocking for this walk.
+ */
+ walk->ticket = NULL;
+ } else if (ret == -EDEADLK) {
+ /* Caller needs to exit the ww transaction. */
+ ret = -ENOSPC;
+ }
+
+ return ret;
+}
+
+static void ttm_lru_walk_unlock(struct ttm_buffer_object *bo, bool locked)
+{
+ if (locked)
+ dma_resv_unlock(bo->base.resv);
+}
+
+/**
+ * ttm_lru_walk_for_evict() - Perform a LRU list walk, with actions taken on
+ * valid items.
+ * @walk: describe the walks and actions taken
+ * @bdev: The TTM device.
+ * @man: The struct ttm_resource manager whose LRU lists we're walking.
+ * @target: The end condition for the walk.
+ *
+ * The LRU lists of @man are walk, and for each struct ttm_resource encountered,
+ * the corresponding ttm_buffer_object is locked and taken a reference on, and
+ * the LRU lock is dropped. the LRU lock may be dropped before locking and, in
+ * that case, it's verified that the item actually remains on the LRU list after
+ * the lock, and that the buffer object didn't switch resource in between.
+ *
+ * With a locked object, the actions indicated by @walk->process_bo are
+ * performed, and after that, the bo is unlocked, the refcount dropped and the
+ * next struct ttm_resource is processed. Here, the walker relies on
+ * TTM's restartable LRU list implementation.
+ *
+ * Typically @walk->process_bo() would return the number of pages evicted,
+ * swapped or shrunken, so that when the total exceeds @target, or when the
+ * LRU list has been walked in full, iteration is terminated. It's also terminated
+ * on error. Note that the definition of @target is done by the caller, it
+ * could have a different meaning than the number of pages.
+ *
+ * Note that the way dma_resv individualization is done, locking needs to be done
+ * either with the LRU lock held (trylocking only) or with a reference on the
+ * object.
+ *
+ * Return: The progress made towards target or negative error code on error.
+ */
+s64 ttm_lru_walk_for_evict(struct ttm_lru_walk *walk, struct ttm_device *bdev,
+ struct ttm_resource_manager *man, s64 target)
+{
+ struct ttm_resource_cursor cursor;
+ struct ttm_resource *res;
+ s64 progress = 0;
+ s64 lret;
+
+ spin_lock(&bdev->lru_lock);
+ ttm_resource_manager_for_each_res(man, &cursor, res) {
+ struct ttm_buffer_object *bo = res->bo;
+ bool bo_needs_unlock = false;
+ bool bo_locked = false;
+ int mem_type;
+
+ /*
+ * Attempt a trylock before taking a reference on the bo,
+ * since if we do it the other way around, and the trylock fails,
+ * we need to drop the lru lock to put the bo.
+ */
+ if (ttm_lru_walk_trylock(walk, bo, &bo_needs_unlock))
+ bo_locked = true;
+ else if (!walk->ticket || walk->ctx->no_wait_gpu ||
+ walk->trylock_only)
+ continue;
+
+ if (!ttm_bo_get_unless_zero(bo)) {
+ ttm_lru_walk_unlock(bo, bo_needs_unlock);
+ continue;
+ }
+
+ mem_type = res->mem_type;
+ spin_unlock(&bdev->lru_lock);
+
+ lret = 0;
+ if (!bo_locked)
+ lret = ttm_lru_walk_ticketlock(walk, bo, &bo_needs_unlock);
+
+ /*
+ * Note that in between the release of the lru lock and the
+ * ticketlock, the bo may have switched resource,
+ * and also memory type, since the resource may have been
+ * freed and allocated again with a different memory type.
+ * In that case, just skip it.
+ */
+ if (!lret && bo->resource && bo->resource->mem_type == mem_type)
+ lret = walk->ops->process_bo(walk, bo);
+
+ ttm_lru_walk_unlock(bo, bo_needs_unlock);
+ ttm_bo_put(bo);
+ if (lret == -EBUSY || lret == -EALREADY)
+ lret = 0;
+ progress = (lret < 0) ? lret : progress + lret;
+
+ spin_lock(&bdev->lru_lock);
+ if (progress < 0 || progress >= target)
+ break;
+ }
+ ttm_resource_cursor_fini(&cursor);
+ spin_unlock(&bdev->lru_lock);
+
+ return progress;
+}
diff --git a/drivers/gpu/drm/ttm/ttm_device.c b/drivers/gpu/drm/ttm/ttm_device.c
index 434cf0258000..e7cc4954c1bc 100644
--- a/drivers/gpu/drm/ttm/ttm_device.c
+++ b/drivers/gpu/drm/ttm/ttm_device.c
@@ -148,35 +148,20 @@ int ttm_global_swapout(struct ttm_operation_ctx *ctx, gfp_t gfp_flags)
int ttm_device_swapout(struct ttm_device *bdev, struct ttm_operation_ctx *ctx,
gfp_t gfp_flags)
{
- struct ttm_resource_cursor cursor;
struct ttm_resource_manager *man;
- struct ttm_resource *res;
unsigned i;
- int ret;
+ s64 lret;
- spin_lock(&bdev->lru_lock);
for (i = TTM_PL_SYSTEM; i < TTM_NUM_MEM_TYPES; ++i) {
man = ttm_manager_type(bdev, i);
if (!man || !man->use_tt)
continue;
- ttm_resource_manager_for_each_res(man, &cursor, res) {
- struct ttm_buffer_object *bo = res->bo;
- uint32_t num_pages;
-
- if (!bo || bo->resource != res)
- continue;
-
- num_pages = PFN_UP(bo->base.size);
- ret = ttm_bo_swapout(bo, ctx, gfp_flags);
- /* ttm_bo_swapout has dropped the lru_lock */
- if (!ret)
- return num_pages;
- if (ret != -EBUSY)
- return ret;
- }
+ lret = ttm_bo_swapout(bdev, ctx, man, gfp_flags, 1);
+ /* Can be both positive (num_pages) and negative (error) */
+ if (lret)
+ return lret;
}
- spin_unlock(&bdev->lru_lock);
return 0;
}
EXPORT_SYMBOL(ttm_device_swapout);
@@ -274,14 +259,14 @@ static void ttm_device_clear_lru_dma_mappings(struct ttm_device *bdev,
struct ttm_resource *res;
spin_lock(&bdev->lru_lock);
- while ((res = list_first_entry_or_null(list, typeof(*res), lru))) {
+ while ((res = ttm_lru_first_res_or_null(list))) {
struct ttm_buffer_object *bo = res->bo;
/* Take ref against racing releases once lru_lock is unlocked */
if (!ttm_bo_get_unless_zero(bo))
continue;
- list_del_init(&res->lru);
+ list_del_init(&bo->resource->lru.link);
spin_unlock(&bdev->lru_lock);
if (bo->ttm)
diff --git a/drivers/gpu/drm/ttm/ttm_pool.c b/drivers/gpu/drm/ttm/ttm_pool.c
index 6e1fd6985ffc..8504dbe19c1a 100644
--- a/drivers/gpu/drm/ttm/ttm_pool.c
+++ b/drivers/gpu/drm/ttm/ttm_pool.c
@@ -91,7 +91,7 @@ static struct page *ttm_pool_alloc_page(struct ttm_pool *pool, gfp_t gfp_flags,
*/
if (order)
gfp_flags |= __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN |
- __GFP_KSWAPD_RECLAIM;
+ __GFP_THISNODE;
if (!pool->use_dma_alloc) {
p = alloc_pages_node(pool->nid, gfp_flags, order);
diff --git a/drivers/gpu/drm/ttm/ttm_resource.c b/drivers/gpu/drm/ttm/ttm_resource.c
index 4a66b851b67d..6d764ba88aab 100644
--- a/drivers/gpu/drm/ttm/ttm_resource.c
+++ b/drivers/gpu/drm/ttm/ttm_resource.c
@@ -33,6 +33,68 @@
#include <drm/drm_util.h>
+/* Detach the cursor from the bulk move list*/
+static void
+ttm_resource_cursor_clear_bulk(struct ttm_resource_cursor *cursor)
+{
+ lockdep_assert_held(&cursor->man->bdev->lru_lock);
+
+ cursor->bulk = NULL;
+ list_del_init(&cursor->bulk_link);
+}
+
+/* Move the cursor to the end of the bulk move list it's in */
+static void ttm_resource_cursor_move_bulk_tail(struct ttm_lru_bulk_move *bulk,
+ struct ttm_resource_cursor *cursor)
+{
+ struct ttm_lru_bulk_move_pos *pos;
+
+ lockdep_assert_held(&cursor->man->bdev->lru_lock);
+
+ if (WARN_ON_ONCE(bulk != cursor->bulk)) {
+ list_del_init(&cursor->bulk_link);
+ return;
+ }
+
+ pos = &bulk->pos[cursor->mem_type][cursor->priority];
+ if (pos->last)
+ list_move(&cursor->hitch.link, &pos->last->lru.link);
+ ttm_resource_cursor_clear_bulk(cursor);
+}
+
+/* Move all cursors attached to a bulk move to its end */
+static void ttm_bulk_move_adjust_cursors(struct ttm_lru_bulk_move *bulk)
+{
+ struct ttm_resource_cursor *cursor, *next;
+
+ list_for_each_entry_safe(cursor, next, &bulk->cursor_list, bulk_link)
+ ttm_resource_cursor_move_bulk_tail(bulk, cursor);
+}
+
+/* Remove a cursor from an empty bulk move list */
+static void ttm_bulk_move_drop_cursors(struct ttm_lru_bulk_move *bulk)
+{
+ struct ttm_resource_cursor *cursor, *next;
+
+ list_for_each_entry_safe(cursor, next, &bulk->cursor_list, bulk_link)
+ ttm_resource_cursor_clear_bulk(cursor);
+}
+
+/**
+ * ttm_resource_cursor_fini() - Finalize the LRU list cursor usage
+ * @cursor: The struct ttm_resource_cursor to finalize.
+ *
+ * The function pulls the LRU list cursor off any lists it was previusly
+ * attached to. Needs to be called with the LRU lock held. The function
+ * can be called multiple times after eachother.
+ */
+void ttm_resource_cursor_fini(struct ttm_resource_cursor *cursor)
+{
+ lockdep_assert_held(&cursor->man->bdev->lru_lock);
+ list_del_init(&cursor->hitch.link);
+ ttm_resource_cursor_clear_bulk(cursor);
+}
+
/**
* ttm_lru_bulk_move_init - initialize a bulk move structure
* @bulk: the structure to init
@@ -42,10 +104,28 @@
void ttm_lru_bulk_move_init(struct ttm_lru_bulk_move *bulk)
{
memset(bulk, 0, sizeof(*bulk));
+ INIT_LIST_HEAD(&bulk->cursor_list);
}
EXPORT_SYMBOL(ttm_lru_bulk_move_init);
/**
+ * ttm_lru_bulk_move_fini - finalize a bulk move structure
+ * @bdev: The struct ttm_device
+ * @bulk: the structure to finalize
+ *
+ * Sanity checks that bulk moves don't have any
+ * resources left and hence no cursors attached.
+ */
+void ttm_lru_bulk_move_fini(struct ttm_device *bdev,
+ struct ttm_lru_bulk_move *bulk)
+{
+ spin_lock(&bdev->lru_lock);
+ ttm_bulk_move_drop_cursors(bulk);
+ spin_unlock(&bdev->lru_lock);
+}
+EXPORT_SYMBOL(ttm_lru_bulk_move_fini);
+
+/**
* ttm_lru_bulk_move_tail - bulk move range of resources to the LRU tail.
*
* @bulk: bulk move structure
@@ -57,6 +137,7 @@ void ttm_lru_bulk_move_tail(struct ttm_lru_bulk_move *bulk)
{
unsigned i, j;
+ ttm_bulk_move_adjust_cursors(bulk);
for (i = 0; i < TTM_NUM_MEM_TYPES; ++i) {
for (j = 0; j < TTM_MAX_BO_PRIORITY; ++j) {
struct ttm_lru_bulk_move_pos *pos = &bulk->pos[i][j];
@@ -70,8 +151,8 @@ void ttm_lru_bulk_move_tail(struct ttm_lru_bulk_move *bulk)
dma_resv_assert_held(pos->last->bo->base.resv);
man = ttm_manager_type(pos->first->bo->bdev, i);
- list_bulk_move_tail(&man->lru[j], &pos->first->lru,
- &pos->last->lru);
+ list_bulk_move_tail(&man->lru[j], &pos->first->lru.link,
+ &pos->last->lru.link);
}
}
}
@@ -84,14 +165,38 @@ ttm_lru_bulk_move_pos(struct ttm_lru_bulk_move *bulk, struct ttm_resource *res)
return &bulk->pos[res->mem_type][res->bo->priority];
}
+/* Return the previous resource on the list (skip over non-resource list items) */
+static struct ttm_resource *ttm_lru_prev_res(struct ttm_resource *cur)
+{
+ struct ttm_lru_item *lru = &cur->lru;
+
+ do {
+ lru = list_prev_entry(lru, link);
+ } while (!ttm_lru_item_is_res(lru));
+
+ return ttm_lru_item_to_res(lru);
+}
+
+/* Return the next resource on the list (skip over non-resource list items) */
+static struct ttm_resource *ttm_lru_next_res(struct ttm_resource *cur)
+{
+ struct ttm_lru_item *lru = &cur->lru;
+
+ do {
+ lru = list_next_entry(lru, link);
+ } while (!ttm_lru_item_is_res(lru));
+
+ return ttm_lru_item_to_res(lru);
+}
+
/* Move the resource to the tail of the bulk move range */
static void ttm_lru_bulk_move_pos_tail(struct ttm_lru_bulk_move_pos *pos,
struct ttm_resource *res)
{
if (pos->last != res) {
if (pos->first == res)
- pos->first = list_next_entry(res, lru);
- list_move(&res->lru, &pos->last->lru);
+ pos->first = ttm_lru_next_res(res);
+ list_move(&res->lru.link, &pos->last->lru.link);
pos->last = res;
}
}
@@ -122,11 +227,11 @@ static void ttm_lru_bulk_move_del(struct ttm_lru_bulk_move *bulk,
pos->first = NULL;
pos->last = NULL;
} else if (pos->first == res) {
- pos->first = list_next_entry(res, lru);
+ pos->first = ttm_lru_next_res(res);
} else if (pos->last == res) {
- pos->last = list_prev_entry(res, lru);
+ pos->last = ttm_lru_prev_res(res);
} else {
- list_move(&res->lru, &pos->last->lru);
+ list_move(&res->lru.link, &pos->last->lru.link);
}
}
@@ -155,7 +260,7 @@ void ttm_resource_move_to_lru_tail(struct ttm_resource *res)
lockdep_assert_held(&bo->bdev->lru_lock);
if (bo->pin_count) {
- list_move_tail(&res->lru, &bdev->pinned);
+ list_move_tail(&res->lru.link, &bdev->pinned);
} else if (bo->bulk_move) {
struct ttm_lru_bulk_move_pos *pos =
@@ -166,7 +271,7 @@ void ttm_resource_move_to_lru_tail(struct ttm_resource *res)
struct ttm_resource_manager *man;
man = ttm_manager_type(bdev, res->mem_type);
- list_move_tail(&res->lru, &man->lru[bo->priority]);
+ list_move_tail(&res->lru.link, &man->lru[bo->priority]);
}
}
@@ -197,9 +302,9 @@ void ttm_resource_init(struct ttm_buffer_object *bo,
man = ttm_manager_type(bo->bdev, place->mem_type);
spin_lock(&bo->bdev->lru_lock);
if (bo->pin_count)
- list_add_tail(&res->lru, &bo->bdev->pinned);
+ list_add_tail(&res->lru.link, &bo->bdev->pinned);
else
- list_add_tail(&res->lru, &man->lru[bo->priority]);
+ list_add_tail(&res->lru.link, &man->lru[bo->priority]);
man->usage += res->size;
spin_unlock(&bo->bdev->lru_lock);
}
@@ -221,7 +326,7 @@ void ttm_resource_fini(struct ttm_resource_manager *man,
struct ttm_device *bdev = man->bdev;
spin_lock(&bdev->lru_lock);
- list_del_init(&res->lru);
+ list_del_init(&res->lru.link);
man->usage -= res->size;
spin_unlock(&bdev->lru_lock);
}
@@ -390,24 +495,11 @@ int ttm_resource_manager_evict_all(struct ttm_device *bdev,
};
struct dma_fence *fence;
int ret;
- unsigned i;
- /*
- * Can't use standard list traversal since we're unlocking.
- */
-
- spin_lock(&bdev->lru_lock);
- for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
- while (!list_empty(&man->lru[i])) {
- spin_unlock(&bdev->lru_lock);
- ret = ttm_mem_evict_first(bdev, man, NULL, &ctx,
- NULL);
- if (ret)
- return ret;
- spin_lock(&bdev->lru_lock);
- }
- }
- spin_unlock(&bdev->lru_lock);
+ do {
+ ret = ttm_bo_evict_first(bdev, man, &ctx);
+ cond_resched();
+ } while (!ret);
spin_lock(&man->move_lock);
fence = dma_fence_get(man->move);
@@ -460,53 +552,106 @@ void ttm_resource_manager_debug(struct ttm_resource_manager *man,
}
EXPORT_SYMBOL(ttm_resource_manager_debug);
+static void
+ttm_resource_cursor_check_bulk(struct ttm_resource_cursor *cursor,
+ struct ttm_lru_item *next_lru)
+{
+ struct ttm_resource *next = ttm_lru_item_to_res(next_lru);
+ struct ttm_lru_bulk_move *bulk = NULL;
+ struct ttm_buffer_object *bo = next->bo;
+
+ lockdep_assert_held(&cursor->man->bdev->lru_lock);
+ bulk = bo->bulk_move;
+
+ if (cursor->bulk != bulk) {
+ if (bulk) {
+ list_move_tail(&cursor->bulk_link, &bulk->cursor_list);
+ cursor->mem_type = next->mem_type;
+ } else {
+ list_del_init(&cursor->bulk_link);
+ }
+ cursor->bulk = bulk;
+ }
+}
+
/**
- * ttm_resource_manager_first
- *
+ * ttm_resource_manager_first() - Start iterating over the resources
+ * of a resource manager
* @man: resource manager to iterate over
* @cursor: cursor to record the position
*
- * Returns the first resource from the resource manager.
+ * Initializes the cursor and starts iterating. When done iterating,
+ * the caller must explicitly call ttm_resource_cursor_fini().
+ *
+ * Return: The first resource from the resource manager.
*/
struct ttm_resource *
ttm_resource_manager_first(struct ttm_resource_manager *man,
struct ttm_resource_cursor *cursor)
{
- struct ttm_resource *res;
-
lockdep_assert_held(&man->bdev->lru_lock);
- for (cursor->priority = 0; cursor->priority < TTM_MAX_BO_PRIORITY;
- ++cursor->priority)
- list_for_each_entry(res, &man->lru[cursor->priority], lru)
- return res;
+ cursor->priority = 0;
+ cursor->man = man;
+ ttm_lru_item_init(&cursor->hitch, TTM_LRU_HITCH);
+ INIT_LIST_HEAD(&cursor->bulk_link);
+ list_add(&cursor->hitch.link, &man->lru[cursor->priority]);
- return NULL;
+ return ttm_resource_manager_next(cursor);
}
/**
- * ttm_resource_manager_next
- *
- * @man: resource manager to iterate over
+ * ttm_resource_manager_next() - Continue iterating over the resource manager
+ * resources
* @cursor: cursor to record the position
- * @res: the current resource pointer
*
- * Returns the next resource from the resource manager.
+ * Return: the next resource from the resource manager.
*/
struct ttm_resource *
-ttm_resource_manager_next(struct ttm_resource_manager *man,
- struct ttm_resource_cursor *cursor,
- struct ttm_resource *res)
+ttm_resource_manager_next(struct ttm_resource_cursor *cursor)
{
+ struct ttm_resource_manager *man = cursor->man;
+ struct ttm_lru_item *lru;
+
lockdep_assert_held(&man->bdev->lru_lock);
- list_for_each_entry_continue(res, &man->lru[cursor->priority], lru)
- return res;
+ for (;;) {
+ lru = &cursor->hitch;
+ list_for_each_entry_continue(lru, &man->lru[cursor->priority], link) {
+ if (ttm_lru_item_is_res(lru)) {
+ ttm_resource_cursor_check_bulk(cursor, lru);
+ list_move(&cursor->hitch.link, &lru->link);
+ return ttm_lru_item_to_res(lru);
+ }
+ }
+
+ if (++cursor->priority >= TTM_MAX_BO_PRIORITY)
+ break;
+
+ list_move(&cursor->hitch.link, &man->lru[cursor->priority]);
+ ttm_resource_cursor_clear_bulk(cursor);
+ }
+
+ ttm_resource_cursor_fini(cursor);
- for (++cursor->priority; cursor->priority < TTM_MAX_BO_PRIORITY;
- ++cursor->priority)
- list_for_each_entry(res, &man->lru[cursor->priority], lru)
- return res;
+ return NULL;
+}
+
+/**
+ * ttm_lru_first_res_or_null() - Return the first resource on an lru list
+ * @head: The list head of the lru list.
+ *
+ * Return: Pointer to the first resource on the lru list or NULL if
+ * there is none.
+ */
+struct ttm_resource *ttm_lru_first_res_or_null(struct list_head *head)
+{
+ struct ttm_lru_item *lru;
+
+ list_for_each_entry(lru, head, link) {
+ if (ttm_lru_item_is_res(lru))
+ return ttm_lru_item_to_res(lru);
+ }
return NULL;
}
diff --git a/drivers/gpu/drm/v3d/v3d_bo.c b/drivers/gpu/drm/v3d/v3d_bo.c
index a165cbcdd27b..ecb80fd75b1a 100644
--- a/drivers/gpu/drm/v3d/v3d_bo.c
+++ b/drivers/gpu/drm/v3d/v3d_bo.c
@@ -26,6 +26,17 @@
#include "v3d_drv.h"
#include "uapi/drm/v3d_drm.h"
+static enum drm_gem_object_status v3d_gem_status(struct drm_gem_object *obj)
+{
+ struct v3d_bo *bo = to_v3d_bo(obj);
+ enum drm_gem_object_status res = 0;
+
+ if (bo->base.pages)
+ res |= DRM_GEM_OBJECT_RESIDENT;
+
+ return res;
+}
+
/* Called DRM core on the last userspace/kernel unreference of the
* BO.
*/
@@ -63,6 +74,7 @@ static const struct drm_gem_object_funcs v3d_gem_funcs = {
.vmap = drm_gem_shmem_object_vmap,
.vunmap = drm_gem_shmem_object_vunmap,
.mmap = drm_gem_shmem_object_mmap,
+ .status = v3d_gem_status,
.vm_ops = &drm_gem_shmem_vm_ops,
};
diff --git a/drivers/gpu/drm/v3d/v3d_drv.c b/drivers/gpu/drm/v3d/v3d_drv.c
index 5982941d933b..d7ff1f5fa481 100644
--- a/drivers/gpu/drm/v3d/v3d_drv.c
+++ b/drivers/gpu/drm/v3d/v3d_drv.c
@@ -95,7 +95,7 @@ static int v3d_get_param_ioctl(struct drm_device *dev, void *data,
args->value = 1;
return 0;
case DRM_V3D_PARAM_MAX_PERF_COUNTERS:
- args->value = v3d->max_counters;
+ args->value = v3d->perfmon_info.max_counters;
return 0;
default:
DRM_DEBUG("Unknown parameter %d\n", args->param);
@@ -184,6 +184,8 @@ static void v3d_show_fdinfo(struct drm_printer *p, struct drm_file *file)
drm_printf(p, "v3d-jobs-%s: \t%llu jobs\n",
v3d_queue_to_string(queue), jobs_completed);
}
+
+ drm_show_memory_stats(p, file);
}
static const struct file_operations v3d_drm_fops = {
@@ -301,12 +303,7 @@ static int v3d_platform_drm_probe(struct platform_device *pdev)
ident3 = V3D_READ(V3D_HUB_IDENT3);
v3d->rev = V3D_GET_FIELD(ident3, V3D_HUB_IDENT3_IPREV);
- if (v3d->ver >= 71)
- v3d->max_counters = V3D_V71_NUM_PERFCOUNTERS;
- else if (v3d->ver >= 42)
- v3d->max_counters = V3D_V42_NUM_PERFCOUNTERS;
- else
- v3d->max_counters = 0;
+ v3d_perfmon_init(v3d);
v3d->reset = devm_reset_control_get_exclusive(dev, NULL);
if (IS_ERR(v3d->reset)) {
diff --git a/drivers/gpu/drm/v3d/v3d_drv.h b/drivers/gpu/drm/v3d/v3d_drv.h
index 49089eefb7c7..cf4b23369dc4 100644
--- a/drivers/gpu/drm/v3d/v3d_drv.h
+++ b/drivers/gpu/drm/v3d/v3d_drv.h
@@ -106,10 +106,7 @@ struct v3d_dev {
bool single_irq_line;
- /* Different revisions of V3D have different total number of performance
- * counters
- */
- unsigned int max_counters;
+ struct v3d_perfmon_info perfmon_info;
void __iomem *hub_regs;
void __iomem *core_regs[3];
@@ -353,13 +350,9 @@ struct v3d_timestamp_query {
struct drm_syncobj *syncobj;
};
-/* Number of perfmons required to handle all supported performance counters */
-#define V3D_MAX_PERFMONS DIV_ROUND_UP(V3D_MAX_COUNTERS, \
- DRM_V3D_MAX_PERF_COUNTERS)
-
struct v3d_performance_query {
/* Performance monitor IDs for this query */
- u32 kperfmon_ids[V3D_MAX_PERFMONS];
+ u32 *kperfmon_ids;
/* Syncobj that indicates the query availability */
struct drm_syncobj *syncobj;
@@ -565,11 +558,16 @@ void v3d_mmu_insert_ptes(struct v3d_bo *bo);
void v3d_mmu_remove_ptes(struct v3d_bo *bo);
/* v3d_sched.c */
+void v3d_timestamp_query_info_free(struct v3d_timestamp_query_info *query_info,
+ unsigned int count);
+void v3d_performance_query_info_free(struct v3d_performance_query_info *query_info,
+ unsigned int count);
void v3d_job_update_stats(struct v3d_job *job, enum v3d_queue queue);
int v3d_sched_init(struct v3d_dev *v3d);
void v3d_sched_fini(struct v3d_dev *v3d);
/* v3d_perfmon.c */
+void v3d_perfmon_init(struct v3d_dev *v3d);
void v3d_perfmon_get(struct v3d_perfmon *perfmon);
void v3d_perfmon_put(struct v3d_perfmon *perfmon);
void v3d_perfmon_start(struct v3d_dev *v3d, struct v3d_perfmon *perfmon);
diff --git a/drivers/gpu/drm/v3d/v3d_perfmon.c b/drivers/gpu/drm/v3d/v3d_perfmon.c
index b7d0b02e1a95..cd7f1eedf17f 100644
--- a/drivers/gpu/drm/v3d/v3d_perfmon.c
+++ b/drivers/gpu/drm/v3d/v3d_perfmon.c
@@ -195,6 +195,23 @@ static const struct v3d_perf_counter_desc v3d_v71_performance_counters[] = {
{"QPU", "QPU-stalls-other", "[QPU] Stalled qcycles waiting for any other reason (vary/W/Z)"},
};
+void v3d_perfmon_init(struct v3d_dev *v3d)
+{
+ const struct v3d_perf_counter_desc *counters = NULL;
+ unsigned int max = 0;
+
+ if (v3d->ver >= 71) {
+ counters = v3d_v71_performance_counters;
+ max = ARRAY_SIZE(v3d_v71_performance_counters);
+ } else if (v3d->ver >= 42) {
+ counters = v3d_v42_performance_counters;
+ max = ARRAY_SIZE(v3d_v42_performance_counters);
+ }
+
+ v3d->perfmon_info.max_counters = max;
+ v3d->perfmon_info.counters = counters;
+}
+
void v3d_perfmon_get(struct v3d_perfmon *perfmon)
{
if (perfmon)
@@ -321,7 +338,7 @@ int v3d_perfmon_create_ioctl(struct drm_device *dev, void *data,
/* Make sure all counters are valid. */
for (i = 0; i < req->ncounters; i++) {
- if (req->counters[i] >= v3d->max_counters)
+ if (req->counters[i] >= v3d->perfmon_info.max_counters)
return -EINVAL;
}
@@ -416,25 +433,14 @@ int v3d_perfmon_get_counter_ioctl(struct drm_device *dev, void *data,
return -EINVAL;
}
+ if (!v3d->perfmon_info.max_counters)
+ return -EOPNOTSUPP;
+
/* Make sure that the counter ID is valid */
- if (req->counter >= v3d->max_counters)
+ if (req->counter >= v3d->perfmon_info.max_counters)
return -EINVAL;
- BUILD_BUG_ON(ARRAY_SIZE(v3d_v42_performance_counters) !=
- V3D_V42_NUM_PERFCOUNTERS);
- BUILD_BUG_ON(ARRAY_SIZE(v3d_v71_performance_counters) !=
- V3D_V71_NUM_PERFCOUNTERS);
- BUILD_BUG_ON(V3D_MAX_COUNTERS < V3D_V42_NUM_PERFCOUNTERS);
- BUILD_BUG_ON(V3D_MAX_COUNTERS < V3D_V71_NUM_PERFCOUNTERS);
- BUILD_BUG_ON((V3D_MAX_COUNTERS != V3D_V42_NUM_PERFCOUNTERS) &&
- (V3D_MAX_COUNTERS != V3D_V71_NUM_PERFCOUNTERS));
-
- if (v3d->ver >= 71)
- counter = &v3d_v71_performance_counters[req->counter];
- else if (v3d->ver >= 42)
- counter = &v3d_v42_performance_counters[req->counter];
- else
- return -EOPNOTSUPP;
+ counter = &v3d->perfmon_info.counters[req->counter];
strscpy(req->name, counter->name, sizeof(req->name));
strscpy(req->category, counter->category, sizeof(req->category));
diff --git a/drivers/gpu/drm/v3d/v3d_performance_counters.h b/drivers/gpu/drm/v3d/v3d_performance_counters.h
index 131b2909522a..d919a2fc9449 100644
--- a/drivers/gpu/drm/v3d/v3d_performance_counters.h
+++ b/drivers/gpu/drm/v3d/v3d_performance_counters.h
@@ -19,11 +19,17 @@ struct v3d_perf_counter_desc {
char description[256];
};
+struct v3d_perfmon_info {
+ /*
+ * Different revisions of V3D have different total number of
+ * performance counters.
+ */
+ unsigned int max_counters;
-#define V3D_V42_NUM_PERFCOUNTERS (87)
-#define V3D_V71_NUM_PERFCOUNTERS (93)
-
-/* Maximum number of performance counters supported by any version of V3D */
-#define V3D_MAX_COUNTERS (93)
+ /*
+ * Array of counters valid for the platform.
+ */
+ const struct v3d_perf_counter_desc *counters;
+};
#endif
diff --git a/drivers/gpu/drm/v3d/v3d_sched.c b/drivers/gpu/drm/v3d/v3d_sched.c
index 271a6d0f5aca..42d4f4a2dba2 100644
--- a/drivers/gpu/drm/v3d/v3d_sched.c
+++ b/drivers/gpu/drm/v3d/v3d_sched.c
@@ -73,24 +73,46 @@ v3d_sched_job_free(struct drm_sched_job *sched_job)
v3d_job_cleanup(job);
}
+void
+v3d_timestamp_query_info_free(struct v3d_timestamp_query_info *query_info,
+ unsigned int count)
+{
+ if (query_info->queries) {
+ unsigned int i;
+
+ for (i = 0; i < count; i++)
+ drm_syncobj_put(query_info->queries[i].syncobj);
+
+ kvfree(query_info->queries);
+ }
+}
+
+void
+v3d_performance_query_info_free(struct v3d_performance_query_info *query_info,
+ unsigned int count)
+{
+ if (query_info->queries) {
+ unsigned int i;
+
+ for (i = 0; i < count; i++) {
+ drm_syncobj_put(query_info->queries[i].syncobj);
+ kvfree(query_info->queries[i].kperfmon_ids);
+ }
+
+ kvfree(query_info->queries);
+ }
+}
+
static void
v3d_cpu_job_free(struct drm_sched_job *sched_job)
{
struct v3d_cpu_job *job = to_cpu_job(sched_job);
- struct v3d_timestamp_query_info *timestamp_query = &job->timestamp_query;
- struct v3d_performance_query_info *performance_query = &job->performance_query;
- if (timestamp_query->queries) {
- for (int i = 0; i < timestamp_query->count; i++)
- drm_syncobj_put(timestamp_query->queries[i].syncobj);
- kvfree(timestamp_query->queries);
- }
+ v3d_timestamp_query_info_free(&job->timestamp_query,
+ job->timestamp_query.count);
- if (performance_query->queries) {
- for (int i = 0; i < performance_query->count; i++)
- drm_syncobj_put(performance_query->queries[i].syncobj);
- kvfree(performance_query->queries);
- }
+ v3d_performance_query_info_free(&job->performance_query,
+ job->performance_query.count);
v3d_job_cleanup(&job->base);
}
@@ -331,8 +353,7 @@ v3d_rewrite_csd_job_wg_counts_from_indirect(struct v3d_cpu_job *job)
struct v3d_bo *bo = to_v3d_bo(job->base.bo[0]);
struct v3d_bo *indirect = to_v3d_bo(indirect_csd->indirect);
struct drm_v3d_submit_csd *args = &indirect_csd->job->args;
- struct v3d_dev *v3d = job->base.v3d;
- u32 num_batches, *wg_counts;
+ u32 *wg_counts;
v3d_get_bo_vaddr(bo);
v3d_get_bo_vaddr(indirect);
@@ -345,17 +366,8 @@ v3d_rewrite_csd_job_wg_counts_from_indirect(struct v3d_cpu_job *job)
args->cfg[0] = wg_counts[0] << V3D_CSD_CFG012_WG_COUNT_SHIFT;
args->cfg[1] = wg_counts[1] << V3D_CSD_CFG012_WG_COUNT_SHIFT;
args->cfg[2] = wg_counts[2] << V3D_CSD_CFG012_WG_COUNT_SHIFT;
-
- num_batches = DIV_ROUND_UP(indirect_csd->wg_size, 16) *
- (wg_counts[0] * wg_counts[1] * wg_counts[2]);
-
- /* V3D 7.1.6 and later don't subtract 1 from the number of batches */
- if (v3d->ver < 71 || (v3d->ver == 71 && v3d->rev < 6))
- args->cfg[4] = num_batches - 1;
- else
- args->cfg[4] = num_batches;
-
- WARN_ON(args->cfg[4] == ~0);
+ args->cfg[4] = DIV_ROUND_UP(indirect_csd->wg_size, 16) *
+ (wg_counts[0] * wg_counts[1] * wg_counts[2]) - 1;
for (int i = 0; i < 3; i++) {
/* 0xffffffff indicates that the uniform rewrite is not needed */
@@ -409,18 +421,23 @@ v3d_reset_timestamp_queries(struct v3d_cpu_job *job)
v3d_put_bo_vaddr(bo);
}
-static void
-write_to_buffer(void *dst, u32 idx, bool do_64bit, u64 value)
+static void write_to_buffer_32(u32 *dst, unsigned int idx, u32 value)
{
- if (do_64bit) {
- u64 *dst64 = (u64 *)dst;
+ dst[idx] = value;
+}
- dst64[idx] = value;
- } else {
- u32 *dst32 = (u32 *)dst;
+static void write_to_buffer_64(u64 *dst, unsigned int idx, u64 value)
+{
+ dst[idx] = value;
+}
- dst32[idx] = (u32)value;
- }
+static void
+write_to_buffer(void *dst, unsigned int idx, bool do_64bit, u64 value)
+{
+ if (do_64bit)
+ write_to_buffer_64(dst, idx, value);
+ else
+ write_to_buffer_32(dst, idx, value);
}
static void
@@ -493,18 +510,24 @@ v3d_reset_performance_queries(struct v3d_cpu_job *job)
}
static void
-v3d_write_performance_query_result(struct v3d_cpu_job *job, void *data, u32 query)
+v3d_write_performance_query_result(struct v3d_cpu_job *job, void *data,
+ unsigned int query)
{
- struct v3d_performance_query_info *performance_query = &job->performance_query;
- struct v3d_copy_query_results_info *copy = &job->copy;
+ struct v3d_performance_query_info *performance_query =
+ &job->performance_query;
struct v3d_file_priv *v3d_priv = job->base.file->driver_priv;
+ struct v3d_performance_query *perf_query =
+ &performance_query->queries[query];
struct v3d_dev *v3d = job->base.v3d;
- struct v3d_perfmon *perfmon;
- u64 counter_values[V3D_MAX_COUNTERS];
+ unsigned int i, j, offset;
+
+ for (i = 0, offset = 0;
+ i < performance_query->nperfmons;
+ i++, offset += DRM_V3D_MAX_PERF_COUNTERS) {
+ struct v3d_perfmon *perfmon;
- for (int i = 0; i < performance_query->nperfmons; i++) {
perfmon = v3d_perfmon_find(v3d_priv,
- performance_query->queries[query].kperfmon_ids[i]);
+ perf_query->kperfmon_ids[i]);
if (!perfmon) {
DRM_DEBUG("Failed to find perfmon.");
continue;
@@ -512,14 +535,18 @@ v3d_write_performance_query_result(struct v3d_cpu_job *job, void *data, u32 quer
v3d_perfmon_stop(v3d, perfmon, true);
- memcpy(&counter_values[i * DRM_V3D_MAX_PERF_COUNTERS], perfmon->values,
- perfmon->ncounters * sizeof(u64));
+ if (job->copy.do_64bit) {
+ for (j = 0; j < perfmon->ncounters; j++)
+ write_to_buffer_64(data, offset + j,
+ perfmon->values[j]);
+ } else {
+ for (j = 0; j < perfmon->ncounters; j++)
+ write_to_buffer_32(data, offset + j,
+ perfmon->values[j]);
+ }
v3d_perfmon_put(perfmon);
}
-
- for (int i = 0; i < performance_query->ncounters; i++)
- write_to_buffer(data, i, copy->do_64bit, counter_values[i]);
}
static void
@@ -626,7 +653,7 @@ v3d_gpu_reset_for_timeout(struct v3d_dev *v3d, struct drm_sched_job *sched_job)
/* Unblock schedulers and restart their jobs. */
for (q = 0; q < V3D_MAX_QUEUES; q++) {
- drm_sched_start(&v3d->queue[q].sched, true);
+ drm_sched_start(&v3d->queue[q].sched);
}
mutex_unlock(&v3d->reset_lock);
diff --git a/drivers/gpu/drm/v3d/v3d_submit.c b/drivers/gpu/drm/v3d/v3d_submit.c
index 88f63d526b22..d607aa9c4ec2 100644
--- a/drivers/gpu/drm/v3d/v3d_submit.c
+++ b/drivers/gpu/drm/v3d/v3d_submit.c
@@ -452,6 +452,9 @@ v3d_get_cpu_timestamp_query_params(struct drm_file *file_priv,
{
u32 __user *offsets, *syncs;
struct drm_v3d_timestamp_query timestamp;
+ struct v3d_timestamp_query_info *query_info = &job->timestamp_query;
+ unsigned int i;
+ int err;
if (!job) {
DRM_DEBUG("CPU job extension was attached to a GPU job.\n");
@@ -471,35 +474,44 @@ v3d_get_cpu_timestamp_query_params(struct drm_file *file_priv,
job->job_type = V3D_CPU_JOB_TYPE_TIMESTAMP_QUERY;
- job->timestamp_query.queries = kvmalloc_array(timestamp.count,
- sizeof(struct v3d_timestamp_query),
- GFP_KERNEL);
- if (!job->timestamp_query.queries)
+ query_info->queries = kvmalloc_array(timestamp.count,
+ sizeof(struct v3d_timestamp_query),
+ GFP_KERNEL);
+ if (!query_info->queries)
return -ENOMEM;
offsets = u64_to_user_ptr(timestamp.offsets);
syncs = u64_to_user_ptr(timestamp.syncs);
- for (int i = 0; i < timestamp.count; i++) {
+ for (i = 0; i < timestamp.count; i++) {
u32 offset, sync;
- if (copy_from_user(&offset, offsets++, sizeof(offset))) {
- kvfree(job->timestamp_query.queries);
- return -EFAULT;
+ if (get_user(offset, offsets++)) {
+ err = -EFAULT;
+ goto error;
}
- job->timestamp_query.queries[i].offset = offset;
+ query_info->queries[i].offset = offset;
- if (copy_from_user(&sync, syncs++, sizeof(sync))) {
- kvfree(job->timestamp_query.queries);
- return -EFAULT;
+ if (get_user(sync, syncs++)) {
+ err = -EFAULT;
+ goto error;
}
- job->timestamp_query.queries[i].syncobj = drm_syncobj_find(file_priv, sync);
+ query_info->queries[i].syncobj = drm_syncobj_find(file_priv,
+ sync);
+ if (!query_info->queries[i].syncobj) {
+ err = -ENOENT;
+ goto error;
+ }
}
- job->timestamp_query.count = timestamp.count;
+ query_info->count = timestamp.count;
return 0;
+
+error:
+ v3d_timestamp_query_info_free(&job->timestamp_query, i);
+ return err;
}
static int
@@ -509,6 +521,9 @@ v3d_get_cpu_reset_timestamp_params(struct drm_file *file_priv,
{
u32 __user *syncs;
struct drm_v3d_reset_timestamp_query reset;
+ struct v3d_timestamp_query_info *query_info = &job->timestamp_query;
+ unsigned int i;
+ int err;
if (!job) {
DRM_DEBUG("CPU job extension was attached to a GPU job.\n");
@@ -525,29 +540,38 @@ v3d_get_cpu_reset_timestamp_params(struct drm_file *file_priv,
job->job_type = V3D_CPU_JOB_TYPE_RESET_TIMESTAMP_QUERY;
- job->timestamp_query.queries = kvmalloc_array(reset.count,
- sizeof(struct v3d_timestamp_query),
- GFP_KERNEL);
- if (!job->timestamp_query.queries)
+ query_info->queries = kvmalloc_array(reset.count,
+ sizeof(struct v3d_timestamp_query),
+ GFP_KERNEL);
+ if (!query_info->queries)
return -ENOMEM;
syncs = u64_to_user_ptr(reset.syncs);
- for (int i = 0; i < reset.count; i++) {
+ for (i = 0; i < reset.count; i++) {
u32 sync;
- job->timestamp_query.queries[i].offset = reset.offset + 8 * i;
+ query_info->queries[i].offset = reset.offset + 8 * i;
- if (copy_from_user(&sync, syncs++, sizeof(sync))) {
- kvfree(job->timestamp_query.queries);
- return -EFAULT;
+ if (get_user(sync, syncs++)) {
+ err = -EFAULT;
+ goto error;
}
- job->timestamp_query.queries[i].syncobj = drm_syncobj_find(file_priv, sync);
+ query_info->queries[i].syncobj = drm_syncobj_find(file_priv,
+ sync);
+ if (!query_info->queries[i].syncobj) {
+ err = -ENOENT;
+ goto error;
+ }
}
- job->timestamp_query.count = reset.count;
+ query_info->count = reset.count;
return 0;
+
+error:
+ v3d_timestamp_query_info_free(&job->timestamp_query, i);
+ return err;
}
/* Get data for the copy timestamp query results job submission. */
@@ -558,7 +582,9 @@ v3d_get_cpu_copy_query_results_params(struct drm_file *file_priv,
{
u32 __user *offsets, *syncs;
struct drm_v3d_copy_timestamp_query copy;
- int i;
+ struct v3d_timestamp_query_info *query_info = &job->timestamp_query;
+ unsigned int i;
+ int err;
if (!job) {
DRM_DEBUG("CPU job extension was attached to a GPU job.\n");
@@ -578,10 +604,10 @@ v3d_get_cpu_copy_query_results_params(struct drm_file *file_priv,
job->job_type = V3D_CPU_JOB_TYPE_COPY_TIMESTAMP_QUERY;
- job->timestamp_query.queries = kvmalloc_array(copy.count,
- sizeof(struct v3d_timestamp_query),
- GFP_KERNEL);
- if (!job->timestamp_query.queries)
+ query_info->queries = kvmalloc_array(copy.count,
+ sizeof(struct v3d_timestamp_query),
+ GFP_KERNEL);
+ if (!query_info->queries)
return -ENOMEM;
offsets = u64_to_user_ptr(copy.offsets);
@@ -590,21 +616,26 @@ v3d_get_cpu_copy_query_results_params(struct drm_file *file_priv,
for (i = 0; i < copy.count; i++) {
u32 offset, sync;
- if (copy_from_user(&offset, offsets++, sizeof(offset))) {
- kvfree(job->timestamp_query.queries);
- return -EFAULT;
+ if (get_user(offset, offsets++)) {
+ err = -EFAULT;
+ goto error;
}
- job->timestamp_query.queries[i].offset = offset;
+ query_info->queries[i].offset = offset;
- if (copy_from_user(&sync, syncs++, sizeof(sync))) {
- kvfree(job->timestamp_query.queries);
- return -EFAULT;
+ if (get_user(sync, syncs++)) {
+ err = -EFAULT;
+ goto error;
}
- job->timestamp_query.queries[i].syncobj = drm_syncobj_find(file_priv, sync);
+ query_info->queries[i].syncobj = drm_syncobj_find(file_priv,
+ sync);
+ if (!query_info->queries[i].syncobj) {
+ err = -ENOENT;
+ goto error;
+ }
}
- job->timestamp_query.count = copy.count;
+ query_info->count = copy.count;
job->copy.do_64bit = copy.do_64bit;
job->copy.do_partial = copy.do_partial;
@@ -613,6 +644,73 @@ v3d_get_cpu_copy_query_results_params(struct drm_file *file_priv,
job->copy.stride = copy.stride;
return 0;
+
+error:
+ v3d_timestamp_query_info_free(&job->timestamp_query, i);
+ return err;
+}
+
+static int
+v3d_copy_query_info(struct v3d_performance_query_info *query_info,
+ unsigned int count,
+ unsigned int nperfmons,
+ u32 __user *syncs,
+ u64 __user *kperfmon_ids,
+ struct drm_file *file_priv)
+{
+ unsigned int i, j;
+ int err;
+
+ for (i = 0; i < count; i++) {
+ struct v3d_performance_query *query = &query_info->queries[i];
+ u32 __user *ids_pointer;
+ u32 sync, id;
+ u64 ids;
+
+ if (get_user(sync, syncs++)) {
+ err = -EFAULT;
+ goto error;
+ }
+
+ if (get_user(ids, kperfmon_ids++)) {
+ err = -EFAULT;
+ goto error;
+ }
+
+ query->kperfmon_ids =
+ kvmalloc_array(nperfmons,
+ sizeof(struct v3d_performance_query *),
+ GFP_KERNEL);
+ if (!query->kperfmon_ids) {
+ err = -ENOMEM;
+ goto error;
+ }
+
+ ids_pointer = u64_to_user_ptr(ids);
+
+ for (j = 0; j < nperfmons; j++) {
+ if (get_user(id, ids_pointer++)) {
+ kvfree(query->kperfmon_ids);
+ err = -EFAULT;
+ goto error;
+ }
+
+ query->kperfmon_ids[j] = id;
+ }
+
+ query->syncobj = drm_syncobj_find(file_priv, sync);
+ if (!query->syncobj) {
+ kvfree(query->kperfmon_ids);
+ err = -ENOENT;
+ goto error;
+ }
+ }
+
+ return 0;
+
+error:
+ v3d_performance_query_info_free(query_info, i);
+ return err;
}
static int
@@ -620,9 +718,9 @@ v3d_get_cpu_reset_performance_params(struct drm_file *file_priv,
struct drm_v3d_extension __user *ext,
struct v3d_cpu_job *job)
{
- u32 __user *syncs;
- u64 __user *kperfmon_ids;
+ struct v3d_performance_query_info *query_info = &job->performance_query;
struct drm_v3d_reset_performance_query reset;
+ int err;
if (!job) {
DRM_DEBUG("CPU job extension was attached to a GPU job.\n");
@@ -639,46 +737,24 @@ v3d_get_cpu_reset_performance_params(struct drm_file *file_priv,
job->job_type = V3D_CPU_JOB_TYPE_RESET_PERFORMANCE_QUERY;
- job->performance_query.queries = kvmalloc_array(reset.count,
- sizeof(struct v3d_performance_query),
- GFP_KERNEL);
- if (!job->performance_query.queries)
+ query_info->queries =
+ kvmalloc_array(reset.count,
+ sizeof(struct v3d_performance_query),
+ GFP_KERNEL);
+ if (!query_info->queries)
return -ENOMEM;
- syncs = u64_to_user_ptr(reset.syncs);
- kperfmon_ids = u64_to_user_ptr(reset.kperfmon_ids);
-
- for (int i = 0; i < reset.count; i++) {
- u32 sync;
- u64 ids;
- u32 __user *ids_pointer;
- u32 id;
-
- if (copy_from_user(&sync, syncs++, sizeof(sync))) {
- kvfree(job->performance_query.queries);
- return -EFAULT;
- }
+ err = v3d_copy_query_info(query_info,
+ reset.count,
+ reset.nperfmons,
+ u64_to_user_ptr(reset.syncs),
+ u64_to_user_ptr(reset.kperfmon_ids),
+ file_priv);
+ if (err)
+ return err;
- job->performance_query.queries[i].syncobj = drm_syncobj_find(file_priv, sync);
-
- if (copy_from_user(&ids, kperfmon_ids++, sizeof(ids))) {
- kvfree(job->performance_query.queries);
- return -EFAULT;
- }
-
- ids_pointer = u64_to_user_ptr(ids);
-
- for (int j = 0; j < reset.nperfmons; j++) {
- if (copy_from_user(&id, ids_pointer++, sizeof(id))) {
- kvfree(job->performance_query.queries);
- return -EFAULT;
- }
-
- job->performance_query.queries[i].kperfmon_ids[j] = id;
- }
- }
- job->performance_query.count = reset.count;
- job->performance_query.nperfmons = reset.nperfmons;
+ query_info->count = reset.count;
+ query_info->nperfmons = reset.nperfmons;
return 0;
}
@@ -688,9 +764,9 @@ v3d_get_cpu_copy_performance_query_params(struct drm_file *file_priv,
struct drm_v3d_extension __user *ext,
struct v3d_cpu_job *job)
{
- u32 __user *syncs;
- u64 __user *kperfmon_ids;
+ struct v3d_performance_query_info *query_info = &job->performance_query;
struct drm_v3d_copy_performance_query copy;
+ int err;
if (!job) {
DRM_DEBUG("CPU job extension was attached to a GPU job.\n");
@@ -710,47 +786,25 @@ v3d_get_cpu_copy_performance_query_params(struct drm_file *file_priv,
job->job_type = V3D_CPU_JOB_TYPE_COPY_PERFORMANCE_QUERY;
- job->performance_query.queries = kvmalloc_array(copy.count,
- sizeof(struct v3d_performance_query),
- GFP_KERNEL);
- if (!job->performance_query.queries)
+ query_info->queries =
+ kvmalloc_array(copy.count,
+ sizeof(struct v3d_performance_query),
+ GFP_KERNEL);
+ if (!query_info->queries)
return -ENOMEM;
- syncs = u64_to_user_ptr(copy.syncs);
- kperfmon_ids = u64_to_user_ptr(copy.kperfmon_ids);
-
- for (int i = 0; i < copy.count; i++) {
- u32 sync;
- u64 ids;
- u32 __user *ids_pointer;
- u32 id;
-
- if (copy_from_user(&sync, syncs++, sizeof(sync))) {
- kvfree(job->performance_query.queries);
- return -EFAULT;
- }
-
- job->performance_query.queries[i].syncobj = drm_syncobj_find(file_priv, sync);
-
- if (copy_from_user(&ids, kperfmon_ids++, sizeof(ids))) {
- kvfree(job->performance_query.queries);
- return -EFAULT;
- }
-
- ids_pointer = u64_to_user_ptr(ids);
-
- for (int j = 0; j < copy.nperfmons; j++) {
- if (copy_from_user(&id, ids_pointer++, sizeof(id))) {
- kvfree(job->performance_query.queries);
- return -EFAULT;
- }
-
- job->performance_query.queries[i].kperfmon_ids[j] = id;
- }
- }
- job->performance_query.count = copy.count;
- job->performance_query.nperfmons = copy.nperfmons;
- job->performance_query.ncounters = copy.ncounters;
+ err = v3d_copy_query_info(query_info,
+ copy.count,
+ copy.nperfmons,
+ u64_to_user_ptr(copy.syncs),
+ u64_to_user_ptr(copy.kperfmon_ids),
+ file_priv);
+ if (err)
+ return err;
+
+ query_info->count = copy.count;
+ query_info->nperfmons = copy.nperfmons;
+ query_info->ncounters = copy.ncounters;
job->copy.do_64bit = copy.do_64bit;
job->copy.do_partial = copy.do_partial;
diff --git a/drivers/gpu/drm/virtio/virtgpu_submit.c b/drivers/gpu/drm/virtio/virtgpu_submit.c
index 1c7c7f61a222..7d34cf83f5f2 100644
--- a/drivers/gpu/drm/virtio/virtgpu_submit.c
+++ b/drivers/gpu/drm/virtio/virtgpu_submit.c
@@ -48,7 +48,7 @@ struct virtio_gpu_submit {
static int virtio_gpu_do_fence_wait(struct virtio_gpu_submit *submit,
struct dma_fence *in_fence)
{
- u32 context = submit->fence_ctx + submit->ring_idx;
+ u64 context = submit->fence_ctx + submit->ring_idx;
if (dma_fence_match_context(in_fence, context))
return 0;
diff --git a/drivers/gpu/drm/vkms/vkms_drv.h b/drivers/gpu/drm/vkms/vkms_drv.h
index 8f5710debb1e..5e46ea5b96dc 100644
--- a/drivers/gpu/drm/vkms/vkms_drv.h
+++ b/drivers/gpu/drm/vkms/vkms_drv.h
@@ -103,7 +103,6 @@ struct vkms_output {
struct drm_writeback_connector wb_connector;
struct hrtimer vblank_hrtimer;
ktime_t period_ns;
- struct drm_pending_vblank_event *event;
/* ordered wq for composer_work */
struct workqueue_struct *composer_workq;
/* protects concurrent access to composer */
diff --git a/drivers/gpu/drm/vkms/vkms_formats.c b/drivers/gpu/drm/vkms/vkms_formats.c
index 36046b12f296..040b7f113a3b 100644
--- a/drivers/gpu/drm/vkms/vkms_formats.c
+++ b/drivers/gpu/drm/vkms/vkms_formats.c
@@ -75,7 +75,7 @@ static void XRGB8888_to_argb_u16(u8 *src_pixels, struct pixel_argb_u16 *out_pixe
static void ARGB16161616_to_argb_u16(u8 *src_pixels, struct pixel_argb_u16 *out_pixel)
{
- u16 *pixels = (u16 *)src_pixels;
+ __le16 *pixels = (__force __le16 *)src_pixels;
out_pixel->a = le16_to_cpu(pixels[3]);
out_pixel->r = le16_to_cpu(pixels[2]);
@@ -85,7 +85,7 @@ static void ARGB16161616_to_argb_u16(u8 *src_pixels, struct pixel_argb_u16 *out_
static void XRGB16161616_to_argb_u16(u8 *src_pixels, struct pixel_argb_u16 *out_pixel)
{
- u16 *pixels = (u16 *)src_pixels;
+ __le16 *pixels = (__force __le16 *)src_pixels;
out_pixel->a = (u16)0xffff;
out_pixel->r = le16_to_cpu(pixels[2]);
@@ -95,7 +95,7 @@ static void XRGB16161616_to_argb_u16(u8 *src_pixels, struct pixel_argb_u16 *out_
static void RGB565_to_argb_u16(u8 *src_pixels, struct pixel_argb_u16 *out_pixel)
{
- u16 *pixels = (u16 *)src_pixels;
+ __le16 *pixels = (__force __le16 *)src_pixels;
s64 fp_rb_ratio = drm_fixp_div(drm_int2fixp(65535), drm_int2fixp(31));
s64 fp_g_ratio = drm_fixp_div(drm_int2fixp(65535), drm_int2fixp(63));
@@ -178,7 +178,7 @@ static void argb_u16_to_XRGB8888(u8 *dst_pixels, struct pixel_argb_u16 *in_pixel
static void argb_u16_to_ARGB16161616(u8 *dst_pixels, struct pixel_argb_u16 *in_pixel)
{
- u16 *pixels = (u16 *)dst_pixels;
+ __le16 *pixels = (__force __le16 *)dst_pixels;
pixels[3] = cpu_to_le16(in_pixel->a);
pixels[2] = cpu_to_le16(in_pixel->r);
@@ -188,9 +188,9 @@ static void argb_u16_to_ARGB16161616(u8 *dst_pixels, struct pixel_argb_u16 *in_p
static void argb_u16_to_XRGB16161616(u8 *dst_pixels, struct pixel_argb_u16 *in_pixel)
{
- u16 *pixels = (u16 *)dst_pixels;
+ __le16 *pixels = (__force __le16 *)dst_pixels;
- pixels[3] = 0xffff;
+ pixels[3] = cpu_to_le16(0xffff);
pixels[2] = cpu_to_le16(in_pixel->r);
pixels[1] = cpu_to_le16(in_pixel->g);
pixels[0] = cpu_to_le16(in_pixel->b);
@@ -198,7 +198,7 @@ static void argb_u16_to_XRGB16161616(u8 *dst_pixels, struct pixel_argb_u16 *in_p
static void argb_u16_to_RGB565(u8 *dst_pixels, struct pixel_argb_u16 *in_pixel)
{
- u16 *pixels = (u16 *)dst_pixels;
+ __le16 *pixels = (__force __le16 *)dst_pixels;
s64 fp_rb_ratio = drm_fixp_div(drm_int2fixp(65535), drm_int2fixp(31));
s64 fp_g_ratio = drm_fixp_div(drm_int2fixp(65535), drm_int2fixp(63));
diff --git a/drivers/gpu/drm/vmwgfx/vmw_surface_cache.h b/drivers/gpu/drm/vmwgfx/vmw_surface_cache.h
index b0d87c5f58d8..1ac3cb151b11 100644
--- a/drivers/gpu/drm/vmwgfx/vmw_surface_cache.h
+++ b/drivers/gpu/drm/vmwgfx/vmw_surface_cache.h
@@ -1,6 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
/**********************************************************
- * Copyright 2021 VMware, Inc.
- * SPDX-License-Identifier: GPL-2.0 OR MIT
+ *
+ * Copyright (c) 2021-2024 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
@@ -31,6 +33,10 @@
#include <drm/vmwgfx_drm.h>
+#define SVGA3D_FLAGS_UPPER_32(svga3d_flags) ((svga3d_flags) >> 32)
+#define SVGA3D_FLAGS_LOWER_32(svga3d_flags) \
+ ((svga3d_flags) & ((uint64_t)U32_MAX))
+
static inline u32 clamped_umul32(u32 a, u32 b)
{
uint64_t tmp = (uint64_t) a*b;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
index 00144632c600..f42ebc4a7c22 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
@@ -1,8 +1,8 @@
// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
- * Copyright © 2011-2023 VMware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
+ * Copyright (c) 2011-2024 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
@@ -28,15 +28,39 @@
#include "vmwgfx_bo.h"
#include "vmwgfx_drv.h"
-
+#include "vmwgfx_resource_priv.h"
#include <drm/ttm/ttm_placement.h>
static void vmw_bo_release(struct vmw_bo *vbo)
{
+ struct vmw_resource *res;
+
WARN_ON(vbo->tbo.base.funcs &&
kref_read(&vbo->tbo.base.refcount) != 0);
vmw_bo_unmap(vbo);
+
+ xa_destroy(&vbo->detached_resources);
+ WARN_ON(vbo->is_dumb && !vbo->dumb_surface);
+ if (vbo->is_dumb && vbo->dumb_surface) {
+ res = &vbo->dumb_surface->res;
+ WARN_ON(vbo != res->guest_memory_bo);
+ WARN_ON(!res->guest_memory_bo);
+ if (res->guest_memory_bo) {
+ /* Reserve and switch the backing mob. */
+ mutex_lock(&res->dev_priv->cmdbuf_mutex);
+ (void)vmw_resource_reserve(res, false, true);
+ vmw_resource_mob_detach(res);
+ if (res->coherent)
+ vmw_bo_dirty_release(res->guest_memory_bo);
+ res->guest_memory_bo = NULL;
+ res->guest_memory_offset = 0;
+ vmw_resource_unreserve(res, false, false, false, NULL,
+ 0);
+ mutex_unlock(&res->dev_priv->cmdbuf_mutex);
+ }
+ vmw_surface_unreference(&vbo->dumb_surface);
+ }
drm_gem_object_release(&vbo->tbo.base);
}
@@ -326,6 +350,11 @@ void vmw_bo_pin_reserved(struct vmw_bo *vbo, bool pin)
*/
void *vmw_bo_map_and_cache(struct vmw_bo *vbo)
{
+ return vmw_bo_map_and_cache_size(vbo, vbo->tbo.base.size);
+}
+
+void *vmw_bo_map_and_cache_size(struct vmw_bo *vbo, size_t size)
+{
struct ttm_buffer_object *bo = &vbo->tbo;
bool not_used;
void *virtual;
@@ -335,9 +364,10 @@ void *vmw_bo_map_and_cache(struct vmw_bo *vbo)
if (virtual)
return virtual;
- ret = ttm_bo_kmap(bo, 0, PFN_UP(bo->base.size), &vbo->map);
+ ret = ttm_bo_kmap(bo, 0, PFN_UP(size), &vbo->map);
if (ret)
- DRM_ERROR("Buffer object map failed: %d.\n", ret);
+ DRM_ERROR("Buffer object map failed: %d (size: bo = %zu, map = %zu).\n",
+ ret, bo->base.size, size);
return ttm_kmap_obj_virtual(&vbo->map, &not_used);
}
@@ -390,6 +420,7 @@ static int vmw_bo_init(struct vmw_private *dev_priv,
BUILD_BUG_ON(TTM_MAX_BO_PRIORITY <= 3);
vmw_bo->tbo.priority = 3;
vmw_bo->res_tree = RB_ROOT;
+ xa_init(&vmw_bo->detached_resources);
params->size = ALIGN(params->size, PAGE_SIZE);
drm_gem_private_object_init(vdev, &vmw_bo->tbo.base, params->size);
@@ -654,52 +685,6 @@ void vmw_bo_fence_single(struct ttm_buffer_object *bo,
dma_fence_put(&fence->base);
}
-
-/**
- * vmw_dumb_create - Create a dumb kms buffer
- *
- * @file_priv: Pointer to a struct drm_file identifying the caller.
- * @dev: Pointer to the drm device.
- * @args: Pointer to a struct drm_mode_create_dumb structure
- * Return: Zero on success, negative error code on failure.
- *
- * This is a driver callback for the core drm create_dumb functionality.
- * Note that this is very similar to the vmw_bo_alloc ioctl, except
- * that the arguments have a different format.
- */
-int vmw_dumb_create(struct drm_file *file_priv,
- struct drm_device *dev,
- struct drm_mode_create_dumb *args)
-{
- struct vmw_private *dev_priv = vmw_priv(dev);
- struct vmw_bo *vbo;
- int cpp = DIV_ROUND_UP(args->bpp, 8);
- int ret;
-
- switch (cpp) {
- case 1: /* DRM_FORMAT_C8 */
- case 2: /* DRM_FORMAT_RGB565 */
- case 4: /* DRM_FORMAT_XRGB8888 */
- break;
- default:
- /*
- * Dumb buffers don't allow anything else.
- * This is tested via IGT's dumb_buffers
- */
- return -EINVAL;
- }
-
- args->pitch = args->width * cpp;
- args->size = ALIGN(args->pitch * args->height, PAGE_SIZE);
-
- ret = vmw_gem_object_create_with_handle(dev_priv, file_priv,
- args->size, &args->handle,
- &vbo);
- /* drop reference from allocate - handle holds it now */
- drm_gem_object_put(&vbo->tbo.base);
- return ret;
-}
-
/**
* vmw_bo_swap_notify - swapout notify callback.
*
@@ -853,3 +838,43 @@ void vmw_bo_placement_set_default_accelerated(struct vmw_bo *bo)
vmw_bo_placement_set(bo, domain, domain);
}
+
+void vmw_bo_add_detached_resource(struct vmw_bo *vbo, struct vmw_resource *res)
+{
+ xa_store(&vbo->detached_resources, (unsigned long)res, res, GFP_KERNEL);
+}
+
+void vmw_bo_del_detached_resource(struct vmw_bo *vbo, struct vmw_resource *res)
+{
+ xa_erase(&vbo->detached_resources, (unsigned long)res);
+}
+
+struct vmw_surface *vmw_bo_surface(struct vmw_bo *vbo)
+{
+ unsigned long index;
+ struct vmw_resource *res = NULL;
+ struct vmw_surface *surf = NULL;
+ struct rb_node *rb_itr = vbo->res_tree.rb_node;
+
+ if (vbo->is_dumb && vbo->dumb_surface) {
+ res = &vbo->dumb_surface->res;
+ goto out;
+ }
+
+ xa_for_each(&vbo->detached_resources, index, res) {
+ if (res->func->res_type == vmw_res_surface)
+ goto out;
+ }
+
+ for (rb_itr = rb_first(&vbo->res_tree); rb_itr;
+ rb_itr = rb_next(rb_itr)) {
+ res = rb_entry(rb_itr, struct vmw_resource, mob_node);
+ if (res->func->res_type == vmw_res_surface)
+ goto out;
+ }
+
+out:
+ if (res)
+ surf = vmw_res_to_srf(res);
+ return surf;
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.h b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.h
index f349642e6190..62b4342d5f7c 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.h
@@ -1,7 +1,8 @@
/* SPDX-License-Identifier: GPL-2.0 OR MIT */
/**************************************************************************
*
- * Copyright 2023 VMware, Inc., Palo Alto, CA., USA
+ * Copyright (c) 2023-2024 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
@@ -35,11 +36,13 @@
#include <linux/rbtree_types.h>
#include <linux/types.h>
+#include <linux/xarray.h>
struct vmw_bo_dirty;
struct vmw_fence_obj;
struct vmw_private;
struct vmw_resource;
+struct vmw_surface;
enum vmw_bo_domain {
VMW_BO_DOMAIN_SYS = BIT(0),
@@ -85,11 +88,15 @@ struct vmw_bo {
struct rb_root res_tree;
u32 res_prios[TTM_MAX_BO_PRIORITY];
+ struct xarray detached_resources;
atomic_t cpu_writers;
/* Not ref-counted. Protected by binding_mutex */
struct vmw_resource *dx_query_ctx;
struct vmw_bo_dirty *dirty;
+
+ bool is_dumb;
+ struct vmw_surface *dumb_surface;
};
void vmw_bo_placement_set(struct vmw_bo *bo, u32 domain, u32 busy_domain);
@@ -124,15 +131,21 @@ void vmw_bo_fence_single(struct ttm_buffer_object *bo,
struct vmw_fence_obj *fence);
void *vmw_bo_map_and_cache(struct vmw_bo *vbo);
+void *vmw_bo_map_and_cache_size(struct vmw_bo *vbo, size_t size);
void vmw_bo_unmap(struct vmw_bo *vbo);
void vmw_bo_move_notify(struct ttm_buffer_object *bo,
struct ttm_resource *mem);
void vmw_bo_swap_notify(struct ttm_buffer_object *bo);
+void vmw_bo_add_detached_resource(struct vmw_bo *vbo, struct vmw_resource *res);
+void vmw_bo_del_detached_resource(struct vmw_bo *vbo, struct vmw_resource *res);
+struct vmw_surface *vmw_bo_surface(struct vmw_bo *vbo);
+
int vmw_user_bo_lookup(struct drm_file *filp,
u32 handle,
struct vmw_bo **out);
+
/**
* vmw_bo_adjust_prio - Adjust the buffer object eviction priority
* according to attached resources
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index a1ce41e1c468..32f50e595809 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -1,7 +1,8 @@
/* SPDX-License-Identifier: GPL-2.0 OR MIT */
/**************************************************************************
*
- * Copyright 2009-2023 VMware, Inc., Palo Alto, CA., USA
+ * Copyright (c) 2009-2024 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
@@ -763,6 +764,26 @@ extern int vmw_gmr_bind(struct vmw_private *dev_priv,
extern void vmw_gmr_unbind(struct vmw_private *dev_priv, int gmr_id);
/**
+ * User handles
+ */
+struct vmw_user_object {
+ struct vmw_surface *surface;
+ struct vmw_bo *buffer;
+};
+
+int vmw_user_object_lookup(struct vmw_private *dev_priv, struct drm_file *filp,
+ u32 handle, struct vmw_user_object *uo);
+struct vmw_user_object *vmw_user_object_ref(struct vmw_user_object *uo);
+void vmw_user_object_unref(struct vmw_user_object *uo);
+bool vmw_user_object_is_null(struct vmw_user_object *uo);
+struct vmw_surface *vmw_user_object_surface(struct vmw_user_object *uo);
+struct vmw_bo *vmw_user_object_buffer(struct vmw_user_object *uo);
+void *vmw_user_object_map(struct vmw_user_object *uo);
+void *vmw_user_object_map_size(struct vmw_user_object *uo, size_t size);
+void vmw_user_object_unmap(struct vmw_user_object *uo);
+bool vmw_user_object_is_mapped(struct vmw_user_object *uo);
+
+/**
* Resource utilities - vmwgfx_resource.c
*/
struct vmw_user_resource_conv;
@@ -776,11 +797,6 @@ extern int vmw_resource_validate(struct vmw_resource *res, bool intr,
extern int vmw_resource_reserve(struct vmw_resource *res, bool interruptible,
bool no_backup);
extern bool vmw_resource_needs_backup(const struct vmw_resource *res);
-extern int vmw_user_lookup_handle(struct vmw_private *dev_priv,
- struct drm_file *filp,
- uint32_t handle,
- struct vmw_surface **out_surf,
- struct vmw_bo **out_buf);
extern int vmw_user_resource_lookup_handle(
struct vmw_private *dev_priv,
struct ttm_object_file *tfile,
@@ -1057,9 +1073,6 @@ int vmw_kms_suspend(struct drm_device *dev);
int vmw_kms_resume(struct drm_device *dev);
void vmw_kms_lost_device(struct drm_device *dev);
-int vmw_dumb_create(struct drm_file *file_priv,
- struct drm_device *dev,
- struct drm_mode_create_dumb *args);
extern int vmw_resource_pin(struct vmw_resource *res, bool interruptible);
extern void vmw_resource_unpin(struct vmw_resource *res);
extern enum vmw_res_type vmw_res_type(const struct vmw_resource *res);
@@ -1176,6 +1189,15 @@ extern int vmw_gb_surface_reference_ext_ioctl(struct drm_device *dev,
int vmw_gb_surface_define(struct vmw_private *dev_priv,
const struct vmw_surface_metadata *req,
struct vmw_surface **srf_out);
+struct vmw_surface *vmw_lookup_surface_for_buffer(struct vmw_private *vmw,
+ struct vmw_bo *bo,
+ u32 handle);
+u32 vmw_lookup_surface_handle_for_buffer(struct vmw_private *vmw,
+ struct vmw_bo *bo,
+ u32 handle);
+int vmw_dumb_create(struct drm_file *file_priv,
+ struct drm_device *dev,
+ struct drm_mode_create_dumb *args);
/*
* Shader management - vmwgfx_shader.c
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
index 5efc6a766f64..588d50ababf6 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
@@ -32,7 +32,6 @@
#define VMW_FENCE_WRAP (1 << 31)
struct vmw_fence_manager {
- int num_fence_objects;
struct vmw_private *dev_priv;
spinlock_t lock;
struct list_head fence_list;
@@ -124,13 +123,13 @@ static void vmw_fence_obj_destroy(struct dma_fence *f)
{
struct vmw_fence_obj *fence =
container_of(f, struct vmw_fence_obj, base);
-
struct vmw_fence_manager *fman = fman_from_fence(fence);
- spin_lock(&fman->lock);
- list_del_init(&fence->head);
- --fman->num_fence_objects;
- spin_unlock(&fman->lock);
+ if (!list_empty(&fence->head)) {
+ spin_lock(&fman->lock);
+ list_del_init(&fence->head);
+ spin_unlock(&fman->lock);
+ }
fence->destroy(fence);
}
@@ -257,7 +256,6 @@ static const struct dma_fence_ops vmw_fence_ops = {
.release = vmw_fence_obj_destroy,
};
-
/*
* Execute signal actions on fences recently signaled.
* This is done from a workqueue so we don't have to execute
@@ -355,7 +353,6 @@ static int vmw_fence_obj_init(struct vmw_fence_manager *fman,
goto out_unlock;
}
list_add_tail(&fence->head, &fman->fence_list);
- ++fman->num_fence_objects;
out_unlock:
spin_unlock(&fman->lock);
@@ -403,7 +400,7 @@ static bool vmw_fence_goal_new_locked(struct vmw_fence_manager *fman,
u32 passed_seqno)
{
u32 goal_seqno;
- struct vmw_fence_obj *fence;
+ struct vmw_fence_obj *fence, *next_fence;
if (likely(!fman->seqno_valid))
return false;
@@ -413,7 +410,7 @@ static bool vmw_fence_goal_new_locked(struct vmw_fence_manager *fman,
return false;
fman->seqno_valid = false;
- list_for_each_entry(fence, &fman->fence_list, head) {
+ list_for_each_entry_safe(fence, next_fence, &fman->fence_list, head) {
if (!list_empty(&fence->seq_passed_actions)) {
fman->seqno_valid = true;
vmw_fence_goal_write(fman->dev_priv,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c
index 07185c108218..b9857f37ca1a 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c
@@ -1,6 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 OR MIT */
/*
- * Copyright 2021-2023 VMware, Inc.
+ * Copyright (c) 2021-2024 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
@@ -78,6 +79,59 @@ static struct sg_table *vmw_gem_object_get_sg_table(struct drm_gem_object *obj)
return drm_prime_pages_to_sg(obj->dev, vmw_tt->dma_ttm.pages, vmw_tt->dma_ttm.num_pages);
}
+static int vmw_gem_vmap(struct drm_gem_object *obj, struct iosys_map *map)
+{
+ struct ttm_buffer_object *bo = drm_gem_ttm_of_gem(obj);
+ int ret;
+
+ if (obj->import_attach) {
+ ret = dma_buf_vmap(obj->import_attach->dmabuf, map);
+ if (!ret) {
+ if (drm_WARN_ON(obj->dev, map->is_iomem)) {
+ dma_buf_vunmap(obj->import_attach->dmabuf, map);
+ return -EIO;
+ }
+ }
+ } else {
+ ret = ttm_bo_vmap(bo, map);
+ }
+
+ return ret;
+}
+
+static void vmw_gem_vunmap(struct drm_gem_object *obj, struct iosys_map *map)
+{
+ if (obj->import_attach)
+ dma_buf_vunmap(obj->import_attach->dmabuf, map);
+ else
+ drm_gem_ttm_vunmap(obj, map);
+}
+
+static int vmw_gem_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
+{
+ int ret;
+
+ if (obj->import_attach) {
+ /*
+ * Reset both vm_ops and vm_private_data, so we don't end up with
+ * vm_ops pointing to our implementation if the dma-buf backend
+ * doesn't set those fields.
+ */
+ vma->vm_private_data = NULL;
+ vma->vm_ops = NULL;
+
+ ret = dma_buf_mmap(obj->dma_buf, vma, 0);
+
+ /* Drop the reference drm_gem_mmap_obj() acquired.*/
+ if (!ret)
+ drm_gem_object_put(obj);
+
+ return ret;
+ }
+
+ return drm_gem_ttm_mmap(obj, vma);
+}
+
static const struct vm_operations_struct vmw_vm_ops = {
.pfn_mkwrite = vmw_bo_vm_mkwrite,
.page_mkwrite = vmw_bo_vm_mkwrite,
@@ -94,9 +148,9 @@ static const struct drm_gem_object_funcs vmw_gem_object_funcs = {
.pin = vmw_gem_object_pin,
.unpin = vmw_gem_object_unpin,
.get_sg_table = vmw_gem_object_get_sg_table,
- .vmap = drm_gem_ttm_vmap,
- .vunmap = drm_gem_ttm_vunmap,
- .mmap = drm_gem_ttm_mmap,
+ .vmap = vmw_gem_vmap,
+ .vunmap = vmw_gem_vunmap,
+ .mmap = vmw_gem_mmap,
.vm_ops = &vmw_vm_ops,
};
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index 00c4ff684130..288ed0bb75cb 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -1,7 +1,8 @@
// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
- * Copyright 2009-2023 VMware, Inc., Palo Alto, CA., USA
+ * Copyright (c) 2009-2024 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
@@ -193,13 +194,16 @@ static u32 vmw_du_cursor_mob_size(u32 w, u32 h)
*/
static u32 *vmw_du_cursor_plane_acquire_image(struct vmw_plane_state *vps)
{
- if (vps->surf) {
- if (vps->surf_mapped)
- return vmw_bo_map_and_cache(vps->surf->res.guest_memory_bo);
- return vps->surf->snooper.image;
- } else if (vps->bo)
- return vmw_bo_map_and_cache(vps->bo);
- return NULL;
+ struct vmw_surface *surf;
+
+ if (vmw_user_object_is_null(&vps->uo))
+ return NULL;
+
+ surf = vmw_user_object_surface(&vps->uo);
+ if (surf && !vmw_user_object_is_mapped(&vps->uo))
+ return surf->snooper.image;
+
+ return vmw_user_object_map(&vps->uo);
}
static bool vmw_du_cursor_plane_has_changed(struct vmw_plane_state *old_vps,
@@ -536,22 +540,16 @@ void vmw_du_primary_plane_destroy(struct drm_plane *plane)
* vmw_du_plane_unpin_surf - unpins resource associated with a framebuffer surface
*
* @vps: plane state associated with the display surface
- * @unreference: true if we also want to unreference the display.
*/
-void vmw_du_plane_unpin_surf(struct vmw_plane_state *vps,
- bool unreference)
+void vmw_du_plane_unpin_surf(struct vmw_plane_state *vps)
{
- if (vps->surf) {
+ struct vmw_surface *surf = vmw_user_object_surface(&vps->uo);
+
+ if (surf) {
if (vps->pinned) {
- vmw_resource_unpin(&vps->surf->res);
+ vmw_resource_unpin(&surf->res);
vps->pinned--;
}
-
- if (unreference) {
- if (vps->pinned)
- DRM_ERROR("Surface still pinned\n");
- vmw_surface_unreference(&vps->surf);
- }
}
}
@@ -572,7 +570,7 @@ vmw_du_plane_cleanup_fb(struct drm_plane *plane,
{
struct vmw_plane_state *vps = vmw_plane_state_to_vps(old_state);
- vmw_du_plane_unpin_surf(vps, false);
+ vmw_du_plane_unpin_surf(vps);
}
@@ -661,25 +659,14 @@ vmw_du_cursor_plane_cleanup_fb(struct drm_plane *plane,
struct vmw_cursor_plane *vcp = vmw_plane_to_vcp(plane);
struct vmw_plane_state *vps = vmw_plane_state_to_vps(old_state);
- if (vps->surf_mapped) {
- vmw_bo_unmap(vps->surf->res.guest_memory_bo);
- vps->surf_mapped = false;
- }
+ if (!vmw_user_object_is_null(&vps->uo))
+ vmw_user_object_unmap(&vps->uo);
vmw_du_cursor_plane_unmap_cm(vps);
vmw_du_put_cursor_mob(vcp, vps);
- vmw_du_plane_unpin_surf(vps, false);
-
- if (vps->surf) {
- vmw_surface_unreference(&vps->surf);
- vps->surf = NULL;
- }
-
- if (vps->bo) {
- vmw_bo_unreference(&vps->bo);
- vps->bo = NULL;
- }
+ vmw_du_plane_unpin_surf(vps);
+ vmw_user_object_unref(&vps->uo);
}
@@ -698,64 +685,48 @@ vmw_du_cursor_plane_prepare_fb(struct drm_plane *plane,
struct drm_framebuffer *fb = new_state->fb;
struct vmw_cursor_plane *vcp = vmw_plane_to_vcp(plane);
struct vmw_plane_state *vps = vmw_plane_state_to_vps(new_state);
+ struct vmw_bo *bo = NULL;
int ret = 0;
- if (vps->surf) {
- if (vps->surf_mapped) {
- vmw_bo_unmap(vps->surf->res.guest_memory_bo);
- vps->surf_mapped = false;
- }
- vmw_surface_unreference(&vps->surf);
- vps->surf = NULL;
- }
-
- if (vps->bo) {
- vmw_bo_unreference(&vps->bo);
- vps->bo = NULL;
+ if (!vmw_user_object_is_null(&vps->uo)) {
+ vmw_user_object_unmap(&vps->uo);
+ vmw_user_object_unref(&vps->uo);
}
if (fb) {
if (vmw_framebuffer_to_vfb(fb)->bo) {
- vps->bo = vmw_framebuffer_to_vfbd(fb)->buffer;
- vmw_bo_reference(vps->bo);
+ vps->uo.buffer = vmw_framebuffer_to_vfbd(fb)->buffer;
+ vps->uo.surface = NULL;
} else {
- vps->surf = vmw_framebuffer_to_vfbs(fb)->surface;
- vmw_surface_reference(vps->surf);
+ memcpy(&vps->uo, &vmw_framebuffer_to_vfbs(fb)->uo, sizeof(vps->uo));
}
+ vmw_user_object_ref(&vps->uo);
}
- if (!vps->surf && vps->bo) {
- const u32 size = new_state->crtc_w * new_state->crtc_h * sizeof(u32);
+ bo = vmw_user_object_buffer(&vps->uo);
+ if (bo) {
+ struct ttm_operation_ctx ctx = {false, false};
- /*
- * Not using vmw_bo_map_and_cache() helper here as we need to
- * reserve the ttm_buffer_object first which
- * vmw_bo_map_and_cache() omits.
- */
- ret = ttm_bo_reserve(&vps->bo->tbo, true, false, NULL);
-
- if (unlikely(ret != 0))
+ ret = ttm_bo_reserve(&bo->tbo, true, false, NULL);
+ if (ret != 0)
return -ENOMEM;
- ret = ttm_bo_kmap(&vps->bo->tbo, 0, PFN_UP(size), &vps->bo->map);
-
- ttm_bo_unreserve(&vps->bo->tbo);
-
- if (unlikely(ret != 0))
+ ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
+ if (ret != 0)
return -ENOMEM;
- } else if (vps->surf && !vps->bo && vps->surf->res.guest_memory_bo) {
- WARN_ON(vps->surf->snooper.image);
- ret = ttm_bo_reserve(&vps->surf->res.guest_memory_bo->tbo, true, false,
- NULL);
- if (unlikely(ret != 0))
- return -ENOMEM;
- vmw_bo_map_and_cache(vps->surf->res.guest_memory_bo);
- ttm_bo_unreserve(&vps->surf->res.guest_memory_bo->tbo);
- vps->surf_mapped = true;
+ vmw_bo_pin_reserved(bo, true);
+ if (vmw_framebuffer_to_vfb(fb)->bo) {
+ const u32 size = new_state->crtc_w * new_state->crtc_h * sizeof(u32);
+
+ (void)vmw_bo_map_and_cache_size(bo, size);
+ } else {
+ vmw_bo_map_and_cache(bo);
+ }
+ ttm_bo_unreserve(&bo->tbo);
}
- if (vps->surf || vps->bo) {
+ if (!vmw_user_object_is_null(&vps->uo)) {
vmw_du_get_cursor_mob(vcp, vps);
vmw_du_cursor_plane_map_cm(vps);
}
@@ -777,14 +748,17 @@ vmw_du_cursor_plane_atomic_update(struct drm_plane *plane,
struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
struct vmw_plane_state *vps = vmw_plane_state_to_vps(new_state);
struct vmw_plane_state *old_vps = vmw_plane_state_to_vps(old_state);
+ struct vmw_bo *old_bo = NULL;
+ struct vmw_bo *new_bo = NULL;
s32 hotspot_x, hotspot_y;
+ int ret;
hotspot_x = du->hotspot_x + new_state->hotspot_x;
hotspot_y = du->hotspot_y + new_state->hotspot_y;
- du->cursor_surface = vps->surf;
+ du->cursor_surface = vmw_user_object_surface(&vps->uo);
- if (!vps->surf && !vps->bo) {
+ if (vmw_user_object_is_null(&vps->uo)) {
vmw_cursor_update_position(dev_priv, false, 0, 0);
return;
}
@@ -792,10 +766,26 @@ vmw_du_cursor_plane_atomic_update(struct drm_plane *plane,
vps->cursor.hotspot_x = hotspot_x;
vps->cursor.hotspot_y = hotspot_y;
- if (vps->surf) {
+ if (du->cursor_surface)
du->cursor_age = du->cursor_surface->snooper.age;
+
+ if (!vmw_user_object_is_null(&old_vps->uo)) {
+ old_bo = vmw_user_object_buffer(&old_vps->uo);
+ ret = ttm_bo_reserve(&old_bo->tbo, false, false, NULL);
+ if (ret != 0)
+ return;
}
+ if (!vmw_user_object_is_null(&vps->uo)) {
+ new_bo = vmw_user_object_buffer(&vps->uo);
+ if (old_bo != new_bo) {
+ ret = ttm_bo_reserve(&new_bo->tbo, false, false, NULL);
+ if (ret != 0)
+ return;
+ } else {
+ new_bo = NULL;
+ }
+ }
if (!vmw_du_cursor_plane_has_changed(old_vps, vps)) {
/*
* If it hasn't changed, avoid making the device do extra
@@ -813,6 +803,11 @@ vmw_du_cursor_plane_atomic_update(struct drm_plane *plane,
hotspot_x, hotspot_y);
}
+ if (old_bo)
+ ttm_bo_unreserve(&old_bo->tbo);
+ if (new_bo)
+ ttm_bo_unreserve(&new_bo->tbo);
+
du->cursor_x = new_state->crtc_x + du->set_gui_x;
du->cursor_y = new_state->crtc_y + du->set_gui_y;
@@ -913,7 +908,7 @@ int vmw_du_cursor_plane_atomic_check(struct drm_plane *plane,
}
if (!vmw_framebuffer_to_vfb(fb)->bo) {
- surface = vmw_framebuffer_to_vfbs(fb)->surface;
+ surface = vmw_user_object_surface(&vmw_framebuffer_to_vfbs(fb)->uo);
WARN_ON(!surface);
@@ -1074,12 +1069,7 @@ vmw_du_plane_duplicate_state(struct drm_plane *plane)
memset(&vps->cursor, 0, sizeof(vps->cursor));
/* Each ref counted resource needs to be acquired again */
- if (vps->surf)
- (void) vmw_surface_reference(vps->surf);
-
- if (vps->bo)
- (void) vmw_bo_reference(vps->bo);
-
+ vmw_user_object_ref(&vps->uo);
state = &vps->base;
__drm_atomic_helper_plane_duplicate_state(plane, state);
@@ -1128,11 +1118,7 @@ vmw_du_plane_destroy_state(struct drm_plane *plane,
struct vmw_plane_state *vps = vmw_plane_state_to_vps(state);
/* Should have been freed by cleanup_fb */
- if (vps->surf)
- vmw_surface_unreference(&vps->surf);
-
- if (vps->bo)
- vmw_bo_unreference(&vps->bo);
+ vmw_user_object_unref(&vps->uo);
drm_atomic_helper_plane_destroy_state(plane, state);
}
@@ -1227,7 +1213,7 @@ static void vmw_framebuffer_surface_destroy(struct drm_framebuffer *framebuffer)
vmw_framebuffer_to_vfbs(framebuffer);
drm_framebuffer_cleanup(framebuffer);
- vmw_surface_unreference(&vfbs->surface);
+ vmw_user_object_unref(&vfbs->uo);
kfree(vfbs);
}
@@ -1272,29 +1258,41 @@ int vmw_kms_readback(struct vmw_private *dev_priv,
return -ENOSYS;
}
+static int vmw_framebuffer_surface_create_handle(struct drm_framebuffer *fb,
+ struct drm_file *file_priv,
+ unsigned int *handle)
+{
+ struct vmw_framebuffer_surface *vfbs = vmw_framebuffer_to_vfbs(fb);
+ struct vmw_bo *bo = vmw_user_object_buffer(&vfbs->uo);
+
+ return drm_gem_handle_create(file_priv, &bo->tbo.base, handle);
+}
static const struct drm_framebuffer_funcs vmw_framebuffer_surface_funcs = {
+ .create_handle = vmw_framebuffer_surface_create_handle,
.destroy = vmw_framebuffer_surface_destroy,
.dirty = drm_atomic_helper_dirtyfb,
};
static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
- struct vmw_surface *surface,
+ struct vmw_user_object *uo,
struct vmw_framebuffer **out,
const struct drm_mode_fb_cmd2
- *mode_cmd,
- bool is_bo_proxy)
+ *mode_cmd)
{
struct drm_device *dev = &dev_priv->drm;
struct vmw_framebuffer_surface *vfbs;
enum SVGA3dSurfaceFormat format;
+ struct vmw_surface *surface;
int ret;
/* 3D is only supported on HWv8 and newer hosts */
if (dev_priv->active_display_unit == vmw_du_legacy)
return -ENOSYS;
+ surface = vmw_user_object_surface(uo);
+
/*
* Sanity checks.
*/
@@ -1357,8 +1355,8 @@ static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
}
drm_helper_mode_fill_fb_struct(dev, &vfbs->base.base, mode_cmd);
- vfbs->surface = vmw_surface_reference(surface);
- vfbs->is_bo_proxy = is_bo_proxy;
+ memcpy(&vfbs->uo, uo, sizeof(vfbs->uo));
+ vmw_user_object_ref(&vfbs->uo);
*out = &vfbs->base;
@@ -1370,7 +1368,7 @@ static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
return 0;
out_err2:
- vmw_surface_unreference(&surface);
+ vmw_user_object_unref(&vfbs->uo);
kfree(vfbs);
out_err1:
return ret;
@@ -1386,7 +1384,6 @@ static int vmw_framebuffer_bo_create_handle(struct drm_framebuffer *fb,
{
struct vmw_framebuffer_bo *vfbd =
vmw_framebuffer_to_vfbd(fb);
-
return drm_gem_handle_create(file_priv, &vfbd->buffer->tbo.base, handle);
}
@@ -1407,86 +1404,6 @@ static const struct drm_framebuffer_funcs vmw_framebuffer_bo_funcs = {
.dirty = drm_atomic_helper_dirtyfb,
};
-/**
- * vmw_create_bo_proxy - create a proxy surface for the buffer object
- *
- * @dev: DRM device
- * @mode_cmd: parameters for the new surface
- * @bo_mob: MOB backing the buffer object
- * @srf_out: newly created surface
- *
- * When the content FB is a buffer object, we create a surface as a proxy to the
- * same buffer. This way we can do a surface copy rather than a surface DMA.
- * This is a more efficient approach
- *
- * RETURNS:
- * 0 on success, error code otherwise
- */
-static int vmw_create_bo_proxy(struct drm_device *dev,
- const struct drm_mode_fb_cmd2 *mode_cmd,
- struct vmw_bo *bo_mob,
- struct vmw_surface **srf_out)
-{
- struct vmw_surface_metadata metadata = {0};
- uint32_t format;
- struct vmw_resource *res;
- unsigned int bytes_pp;
- int ret;
-
- switch (mode_cmd->pixel_format) {
- case DRM_FORMAT_ARGB8888:
- case DRM_FORMAT_XRGB8888:
- format = SVGA3D_X8R8G8B8;
- bytes_pp = 4;
- break;
-
- case DRM_FORMAT_RGB565:
- case DRM_FORMAT_XRGB1555:
- format = SVGA3D_R5G6B5;
- bytes_pp = 2;
- break;
-
- case 8:
- format = SVGA3D_P8;
- bytes_pp = 1;
- break;
-
- default:
- DRM_ERROR("Invalid framebuffer format %p4cc\n",
- &mode_cmd->pixel_format);
- return -EINVAL;
- }
-
- metadata.format = format;
- metadata.mip_levels[0] = 1;
- metadata.num_sizes = 1;
- metadata.base_size.width = mode_cmd->pitches[0] / bytes_pp;
- metadata.base_size.height = mode_cmd->height;
- metadata.base_size.depth = 1;
- metadata.scanout = true;
-
- ret = vmw_gb_surface_define(vmw_priv(dev), &metadata, srf_out);
- if (ret) {
- DRM_ERROR("Failed to allocate proxy content buffer\n");
- return ret;
- }
-
- res = &(*srf_out)->res;
-
- /* Reserve and switch the backing mob. */
- mutex_lock(&res->dev_priv->cmdbuf_mutex);
- (void) vmw_resource_reserve(res, false, true);
- vmw_user_bo_unref(&res->guest_memory_bo);
- res->guest_memory_bo = vmw_user_bo_ref(bo_mob);
- res->guest_memory_offset = 0;
- vmw_resource_unreserve(res, false, false, false, NULL, 0);
- mutex_unlock(&res->dev_priv->cmdbuf_mutex);
-
- return 0;
-}
-
-
-
static int vmw_kms_new_framebuffer_bo(struct vmw_private *dev_priv,
struct vmw_bo *bo,
struct vmw_framebuffer **out,
@@ -1565,55 +1482,24 @@ vmw_kms_srf_ok(struct vmw_private *dev_priv, uint32_t width, uint32_t height)
* vmw_kms_new_framebuffer - Create a new framebuffer.
*
* @dev_priv: Pointer to device private struct.
- * @bo: Pointer to buffer object to wrap the kms framebuffer around.
- * Either @bo or @surface must be NULL.
- * @surface: Pointer to a surface to wrap the kms framebuffer around.
- * Either @bo or @surface must be NULL.
- * @only_2d: No presents will occur to this buffer object based framebuffer.
- * This helps the code to do some important optimizations.
+ * @uo: Pointer to user object to wrap the kms framebuffer around.
+ * Either the buffer or surface inside the user object must be NULL.
* @mode_cmd: Frame-buffer metadata.
*/
struct vmw_framebuffer *
vmw_kms_new_framebuffer(struct vmw_private *dev_priv,
- struct vmw_bo *bo,
- struct vmw_surface *surface,
- bool only_2d,
+ struct vmw_user_object *uo,
const struct drm_mode_fb_cmd2 *mode_cmd)
{
struct vmw_framebuffer *vfb = NULL;
- bool is_bo_proxy = false;
int ret;
- /*
- * We cannot use the SurfaceDMA command in an non-accelerated VM,
- * therefore, wrap the buffer object in a surface so we can use the
- * SurfaceCopy command.
- */
- if (vmw_kms_srf_ok(dev_priv, mode_cmd->width, mode_cmd->height) &&
- bo && only_2d &&
- mode_cmd->width > 64 && /* Don't create a proxy for cursor */
- dev_priv->active_display_unit == vmw_du_screen_target) {
- ret = vmw_create_bo_proxy(&dev_priv->drm, mode_cmd,
- bo, &surface);
- if (ret)
- return ERR_PTR(ret);
-
- is_bo_proxy = true;
- }
-
/* Create the new framebuffer depending one what we have */
- if (surface) {
- ret = vmw_kms_new_framebuffer_surface(dev_priv, surface, &vfb,
- mode_cmd,
- is_bo_proxy);
- /*
- * vmw_create_bo_proxy() adds a reference that is no longer
- * needed
- */
- if (is_bo_proxy)
- vmw_surface_unreference(&surface);
- } else if (bo) {
- ret = vmw_kms_new_framebuffer_bo(dev_priv, bo, &vfb,
+ if (vmw_user_object_surface(uo)) {
+ ret = vmw_kms_new_framebuffer_surface(dev_priv, uo, &vfb,
+ mode_cmd);
+ } else if (uo->buffer) {
+ ret = vmw_kms_new_framebuffer_bo(dev_priv, uo->buffer, &vfb,
mode_cmd);
} else {
BUG();
@@ -1635,14 +1521,12 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
{
struct vmw_private *dev_priv = vmw_priv(dev);
struct vmw_framebuffer *vfb = NULL;
- struct vmw_surface *surface = NULL;
- struct vmw_bo *bo = NULL;
+ struct vmw_user_object uo = {0};
int ret;
/* returns either a bo or surface */
- ret = vmw_user_lookup_handle(dev_priv, file_priv,
- mode_cmd->handles[0],
- &surface, &bo);
+ ret = vmw_user_object_lookup(dev_priv, file_priv, mode_cmd->handles[0],
+ &uo);
if (ret) {
DRM_ERROR("Invalid buffer object handle %u (0x%x).\n",
mode_cmd->handles[0], mode_cmd->handles[0]);
@@ -1650,7 +1534,7 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
}
- if (!bo &&
+ if (vmw_user_object_surface(&uo) &&
!vmw_kms_srf_ok(dev_priv, mode_cmd->width, mode_cmd->height)) {
DRM_ERROR("Surface size cannot exceed %dx%d\n",
dev_priv->texture_max_width,
@@ -1659,20 +1543,15 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
}
- vfb = vmw_kms_new_framebuffer(dev_priv, bo, surface,
- !(dev_priv->capabilities & SVGA_CAP_3D),
- mode_cmd);
+ vfb = vmw_kms_new_framebuffer(dev_priv, &uo, mode_cmd);
if (IS_ERR(vfb)) {
ret = PTR_ERR(vfb);
goto err_out;
}
err_out:
- /* vmw_user_lookup_handle takes one ref so does new_fb */
- if (bo)
- vmw_user_bo_unref(&bo);
- if (surface)
- vmw_surface_unreference(&surface);
+ /* vmw_user_object_lookup takes one ref so does new_fb */
+ vmw_user_object_unref(&uo);
if (ret) {
DRM_ERROR("failed to create vmw_framebuffer: %i\n", ret);
@@ -2585,72 +2464,6 @@ void vmw_kms_helper_validation_finish(struct vmw_private *dev_priv,
}
/**
- * vmw_kms_update_proxy - Helper function to update a proxy surface from
- * its backing MOB.
- *
- * @res: Pointer to the surface resource
- * @clips: Clip rects in framebuffer (surface) space.
- * @num_clips: Number of clips in @clips.
- * @increment: Integer with which to increment the clip counter when looping.
- * Used to skip a predetermined number of clip rects.
- *
- * This function makes sure the proxy surface is updated from its backing MOB
- * using the region given by @clips. The surface resource @res and its backing
- * MOB needs to be reserved and validated on call.
- */
-int vmw_kms_update_proxy(struct vmw_resource *res,
- const struct drm_clip_rect *clips,
- unsigned num_clips,
- int increment)
-{
- struct vmw_private *dev_priv = res->dev_priv;
- struct drm_vmw_size *size = &vmw_res_to_srf(res)->metadata.base_size;
- struct {
- SVGA3dCmdHeader header;
- SVGA3dCmdUpdateGBImage body;
- } *cmd;
- SVGA3dBox *box;
- size_t copy_size = 0;
- int i;
-
- if (!clips)
- return 0;
-
- cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd) * num_clips);
- if (!cmd)
- return -ENOMEM;
-
- for (i = 0; i < num_clips; ++i, clips += increment, ++cmd) {
- box = &cmd->body.box;
-
- cmd->header.id = SVGA_3D_CMD_UPDATE_GB_IMAGE;
- cmd->header.size = sizeof(cmd->body);
- cmd->body.image.sid = res->id;
- cmd->body.image.face = 0;
- cmd->body.image.mipmap = 0;
-
- if (clips->x1 > size->width || clips->x2 > size->width ||
- clips->y1 > size->height || clips->y2 > size->height) {
- DRM_ERROR("Invalid clips outsize of framebuffer.\n");
- return -EINVAL;
- }
-
- box->x = clips->x1;
- box->y = clips->y1;
- box->z = 0;
- box->w = clips->x2 - clips->x1;
- box->h = clips->y2 - clips->y1;
- box->d = 1;
-
- copy_size += sizeof(*cmd);
- }
-
- vmw_cmd_commit(dev_priv, copy_size);
-
- return 0;
-}
-
-/**
* vmw_kms_create_implicit_placement_property - Set up the implicit placement
* property.
*
@@ -2784,8 +2597,9 @@ int vmw_du_helper_plane_update(struct vmw_du_update_plane *update)
} else {
struct vmw_framebuffer_surface *vfbs =
container_of(update->vfb, typeof(*vfbs), base);
+ struct vmw_surface *surf = vmw_user_object_surface(&vfbs->uo);
- ret = vmw_validation_add_resource(&val_ctx, &vfbs->surface->res,
+ ret = vmw_validation_add_resource(&val_ctx, &surf->res,
0, VMW_RES_DIRTY_NONE, NULL,
NULL);
}
@@ -2941,3 +2755,93 @@ int vmw_connector_get_modes(struct drm_connector *connector)
return num_modes;
}
+
+struct vmw_user_object *vmw_user_object_ref(struct vmw_user_object *uo)
+{
+ if (uo->buffer)
+ vmw_user_bo_ref(uo->buffer);
+ else if (uo->surface)
+ vmw_surface_reference(uo->surface);
+ return uo;
+}
+
+void vmw_user_object_unref(struct vmw_user_object *uo)
+{
+ if (uo->buffer)
+ vmw_user_bo_unref(&uo->buffer);
+ else if (uo->surface)
+ vmw_surface_unreference(&uo->surface);
+}
+
+struct vmw_bo *
+vmw_user_object_buffer(struct vmw_user_object *uo)
+{
+ if (uo->buffer)
+ return uo->buffer;
+ else if (uo->surface)
+ return uo->surface->res.guest_memory_bo;
+ return NULL;
+}
+
+struct vmw_surface *
+vmw_user_object_surface(struct vmw_user_object *uo)
+{
+ if (uo->buffer)
+ return uo->buffer->dumb_surface;
+ return uo->surface;
+}
+
+void *vmw_user_object_map(struct vmw_user_object *uo)
+{
+ struct vmw_bo *bo = vmw_user_object_buffer(uo);
+
+ WARN_ON(!bo);
+ return vmw_bo_map_and_cache(bo);
+}
+
+void *vmw_user_object_map_size(struct vmw_user_object *uo, size_t size)
+{
+ struct vmw_bo *bo = vmw_user_object_buffer(uo);
+
+ WARN_ON(!bo);
+ return vmw_bo_map_and_cache_size(bo, size);
+}
+
+void vmw_user_object_unmap(struct vmw_user_object *uo)
+{
+ struct vmw_bo *bo = vmw_user_object_buffer(uo);
+ int ret;
+
+ WARN_ON(!bo);
+
+ /* Fence the mob creation so we are guarateed to have the mob */
+ ret = ttm_bo_reserve(&bo->tbo, false, false, NULL);
+ if (ret != 0)
+ return;
+
+ vmw_bo_unmap(bo);
+ vmw_bo_pin_reserved(bo, false);
+
+ ttm_bo_unreserve(&bo->tbo);
+}
+
+bool vmw_user_object_is_mapped(struct vmw_user_object *uo)
+{
+ struct vmw_bo *bo;
+
+ if (!uo || vmw_user_object_is_null(uo))
+ return false;
+
+ bo = vmw_user_object_buffer(uo);
+
+ if (WARN_ON(!bo))
+ return false;
+
+ WARN_ON(bo->map.bo && !bo->map.virtual);
+ return bo->map.virtual;
+}
+
+bool vmw_user_object_is_null(struct vmw_user_object *uo)
+{
+ return !uo->buffer && !uo->surface;
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
index bf24f2f0dcfc..6141fadf81ef 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
@@ -1,7 +1,8 @@
/* SPDX-License-Identifier: GPL-2.0 OR MIT */
/**************************************************************************
*
- * Copyright 2009-2023 VMware, Inc., Palo Alto, CA., USA
+ * Copyright (c) 2009-2024 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
@@ -221,11 +222,9 @@ struct vmw_framebuffer {
struct vmw_framebuffer_surface {
struct vmw_framebuffer base;
- struct vmw_surface *surface;
- bool is_bo_proxy; /* true if this is proxy surface for DMA buf */
+ struct vmw_user_object uo;
};
-
struct vmw_framebuffer_bo {
struct vmw_framebuffer base;
struct vmw_bo *buffer;
@@ -277,8 +276,7 @@ struct vmw_cursor_plane_state {
*/
struct vmw_plane_state {
struct drm_plane_state base;
- struct vmw_surface *surf;
- struct vmw_bo *bo;
+ struct vmw_user_object uo;
int content_fb_type;
unsigned long bo_size;
@@ -457,9 +455,7 @@ int vmw_kms_readback(struct vmw_private *dev_priv,
uint32_t num_clips);
struct vmw_framebuffer *
vmw_kms_new_framebuffer(struct vmw_private *dev_priv,
- struct vmw_bo *bo,
- struct vmw_surface *surface,
- bool only_2d,
+ struct vmw_user_object *uo,
const struct drm_mode_fb_cmd2 *mode_cmd);
void vmw_guess_mode_timing(struct drm_display_mode *mode);
void vmw_kms_update_implicit_fb(struct vmw_private *dev_priv);
@@ -486,8 +482,7 @@ void vmw_du_plane_reset(struct drm_plane *plane);
struct drm_plane_state *vmw_du_plane_duplicate_state(struct drm_plane *plane);
void vmw_du_plane_destroy_state(struct drm_plane *plane,
struct drm_plane_state *state);
-void vmw_du_plane_unpin_surf(struct vmw_plane_state *vps,
- bool unreference);
+void vmw_du_plane_unpin_surf(struct vmw_plane_state *vps);
int vmw_du_crtc_atomic_check(struct drm_crtc *crtc,
struct drm_atomic_state *state);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
index 5befc2719a49..39949e0a493f 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
@@ -1,7 +1,8 @@
// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
- * Copyright 2009-2023 VMware, Inc., Palo Alto, CA., USA
+ * Copyright (c) 2009-2024 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
@@ -147,8 +148,9 @@ static int vmw_ldu_fb_pin(struct vmw_framebuffer *vfb)
struct vmw_bo *buf;
int ret;
- buf = vfb->bo ? vmw_framebuffer_to_vfbd(&vfb->base)->buffer :
- vmw_framebuffer_to_vfbs(&vfb->base)->surface->res.guest_memory_bo;
+ buf = vfb->bo ?
+ vmw_framebuffer_to_vfbd(&vfb->base)->buffer :
+ vmw_user_object_buffer(&vmw_framebuffer_to_vfbs(&vfb->base)->uo);
if (!buf)
return 0;
@@ -169,8 +171,10 @@ static int vmw_ldu_fb_unpin(struct vmw_framebuffer *vfb)
struct vmw_private *dev_priv = vmw_priv(vfb->base.dev);
struct vmw_bo *buf;
- buf = vfb->bo ? vmw_framebuffer_to_vfbd(&vfb->base)->buffer :
- vmw_framebuffer_to_vfbs(&vfb->base)->surface->res.guest_memory_bo;
+ buf = vfb->bo ?
+ vmw_framebuffer_to_vfbd(&vfb->base)->buffer :
+ vmw_user_object_buffer(&vmw_framebuffer_to_vfbs(&vfb->base)->uo);
+
if (WARN_ON(!buf))
return 0;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
index c45b4724e414..e20f64b67b26 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
@@ -92,7 +92,7 @@ static int vmw_overlay_send_put(struct vmw_private *dev_priv,
{
struct vmw_escape_video_flush *flush;
size_t fifo_size;
- bool have_so = (dev_priv->active_display_unit == vmw_du_screen_object);
+ bool have_so = (dev_priv->active_display_unit != vmw_du_legacy);
int i, num_items;
SVGAGuestPtr ptr;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_prime.c b/drivers/gpu/drm/vmwgfx/vmwgfx_prime.c
index c99cad444991..598b90ac7590 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_prime.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_prime.c
@@ -1,7 +1,8 @@
// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
- * Copyright 2013 VMware, Inc., Palo Alto, CA., USA
+ * Copyright (c) 2013-2024 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
@@ -31,6 +32,7 @@
*/
#include "vmwgfx_drv.h"
+#include "vmwgfx_bo.h"
#include "ttm_object.h"
#include <linux/dma-buf.h>
@@ -88,13 +90,35 @@ int vmw_prime_handle_to_fd(struct drm_device *dev,
uint32_t handle, uint32_t flags,
int *prime_fd)
{
+ struct vmw_private *vmw = vmw_priv(dev);
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+ struct vmw_bo *vbo;
int ret;
+ int surf_handle;
- if (handle > VMWGFX_NUM_MOB)
+ if (handle > VMWGFX_NUM_MOB) {
ret = ttm_prime_handle_to_fd(tfile, handle, flags, prime_fd);
- else
- ret = drm_gem_prime_handle_to_fd(dev, file_priv, handle, flags, prime_fd);
+ } else {
+ ret = vmw_user_bo_lookup(file_priv, handle, &vbo);
+ if (ret)
+ return ret;
+ if (vbo && vbo->is_dumb) {
+ ret = drm_gem_prime_handle_to_fd(dev, file_priv, handle,
+ flags, prime_fd);
+ } else {
+ surf_handle = vmw_lookup_surface_handle_for_buffer(vmw,
+ vbo,
+ handle);
+ if (surf_handle > 0)
+ ret = ttm_prime_handle_to_fd(tfile, surf_handle,
+ flags, prime_fd);
+ else
+ ret = drm_gem_prime_handle_to_fd(dev, file_priv,
+ handle, flags,
+ prime_fd);
+ }
+ vmw_user_bo_unref(&vbo);
+ }
return ret;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index 848dba09981b..a73af8a355fb 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -1,7 +1,8 @@
// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
- * Copyright 2009-2023 VMware, Inc., Palo Alto, CA., USA
+ * Copyright (c) 2009-2024 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
@@ -58,6 +59,7 @@ void vmw_resource_mob_attach(struct vmw_resource *res)
rb_link_node(&res->mob_node, parent, new);
rb_insert_color(&res->mob_node, &gbo->res_tree);
+ vmw_bo_del_detached_resource(gbo, res);
vmw_bo_prio_add(gbo, res->used_prio);
}
@@ -287,28 +289,35 @@ out_bad_resource:
*
* The pointer this pointed at by out_surf and out_buf needs to be null.
*/
-int vmw_user_lookup_handle(struct vmw_private *dev_priv,
+int vmw_user_object_lookup(struct vmw_private *dev_priv,
struct drm_file *filp,
- uint32_t handle,
- struct vmw_surface **out_surf,
- struct vmw_bo **out_buf)
+ u32 handle,
+ struct vmw_user_object *uo)
{
struct ttm_object_file *tfile = vmw_fpriv(filp)->tfile;
struct vmw_resource *res;
int ret;
- BUG_ON(*out_surf || *out_buf);
+ WARN_ON(uo->surface || uo->buffer);
ret = vmw_user_resource_lookup_handle(dev_priv, tfile, handle,
user_surface_converter,
&res);
if (!ret) {
- *out_surf = vmw_res_to_srf(res);
+ uo->surface = vmw_res_to_srf(res);
return 0;
}
- *out_surf = NULL;
- ret = vmw_user_bo_lookup(filp, handle, out_buf);
+ uo->surface = NULL;
+ ret = vmw_user_bo_lookup(filp, handle, &uo->buffer);
+ if (!ret && !uo->buffer->is_dumb) {
+ uo->surface = vmw_lookup_surface_for_buffer(dev_priv,
+ uo->buffer,
+ handle);
+ if (uo->surface)
+ vmw_user_bo_unref(&uo->buffer);
+ }
+
return ret;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
index df0039a8ef29..0f4bfd98480a 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
@@ -1,7 +1,8 @@
// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
- * Copyright 2011-2023 VMware, Inc., Palo Alto, CA., USA
+ * Copyright (c) 2011-2024 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
@@ -240,7 +241,7 @@ static void vmw_sou_crtc_mode_set_nofb(struct drm_crtc *crtc)
struct vmw_connector_state *vmw_conn_state;
int x, y;
- sou->buffer = vps->bo;
+ sou->buffer = vmw_user_object_buffer(&vps->uo);
conn_state = sou->base.connector.state;
vmw_conn_state = vmw_connector_state_to_vcs(conn_state);
@@ -376,10 +377,11 @@ vmw_sou_primary_plane_cleanup_fb(struct drm_plane *plane,
struct vmw_plane_state *vps = vmw_plane_state_to_vps(old_state);
struct drm_crtc *crtc = plane->state->crtc ?
plane->state->crtc : old_state->crtc;
+ struct vmw_bo *bo = vmw_user_object_buffer(&vps->uo);
- if (vps->bo)
- vmw_bo_unpin(vmw_priv(crtc->dev), vps->bo, false);
- vmw_bo_unreference(&vps->bo);
+ if (bo)
+ vmw_bo_unpin(vmw_priv(crtc->dev), bo, false);
+ vmw_user_object_unref(&vps->uo);
vps->bo_size = 0;
vmw_du_plane_cleanup_fb(plane, old_state);
@@ -411,9 +413,10 @@ vmw_sou_primary_plane_prepare_fb(struct drm_plane *plane,
.bo_type = ttm_bo_type_device,
.pin = true
};
+ struct vmw_bo *bo = NULL;
if (!new_fb) {
- vmw_bo_unreference(&vps->bo);
+ vmw_user_object_unref(&vps->uo);
vps->bo_size = 0;
return 0;
@@ -422,17 +425,17 @@ vmw_sou_primary_plane_prepare_fb(struct drm_plane *plane,
bo_params.size = new_state->crtc_w * new_state->crtc_h * 4;
dev_priv = vmw_priv(crtc->dev);
- if (vps->bo) {
+ bo = vmw_user_object_buffer(&vps->uo);
+ if (bo) {
if (vps->bo_size == bo_params.size) {
/*
* Note that this might temporarily up the pin-count
* to 2, until cleanup_fb() is called.
*/
- return vmw_bo_pin_in_vram(dev_priv, vps->bo,
- true);
+ return vmw_bo_pin_in_vram(dev_priv, bo, true);
}
- vmw_bo_unreference(&vps->bo);
+ vmw_user_object_unref(&vps->uo);
vps->bo_size = 0;
}
@@ -442,7 +445,7 @@ vmw_sou_primary_plane_prepare_fb(struct drm_plane *plane,
* resume the overlays, this is preferred to failing to alloc.
*/
vmw_overlay_pause_all(dev_priv);
- ret = vmw_bo_create(dev_priv, &bo_params, &vps->bo);
+ ret = vmw_gem_object_create(dev_priv, &bo_params, &vps->uo.buffer);
vmw_overlay_resume_all(dev_priv);
if (ret)
return ret;
@@ -453,7 +456,7 @@ vmw_sou_primary_plane_prepare_fb(struct drm_plane *plane,
* TTM already thinks the buffer is pinned, but make sure the
* pin_count is upped.
*/
- return vmw_bo_pin_in_vram(dev_priv, vps->bo, true);
+ return vmw_bo_pin_in_vram(dev_priv, vps->uo.buffer, true);
}
static uint32_t vmw_sou_bo_fifo_size(struct vmw_du_update_plane *update,
@@ -580,6 +583,7 @@ static uint32_t vmw_sou_surface_pre_clip(struct vmw_du_update_plane *update,
{
struct vmw_kms_sou_dirty_cmd *blit = cmd;
struct vmw_framebuffer_surface *vfbs;
+ struct vmw_surface *surf = NULL;
vfbs = container_of(update->vfb, typeof(*vfbs), base);
@@ -587,7 +591,8 @@ static uint32_t vmw_sou_surface_pre_clip(struct vmw_du_update_plane *update,
blit->header.size = sizeof(blit->body) + sizeof(SVGASignedRect) *
num_hits;
- blit->body.srcImage.sid = vfbs->surface->res.id;
+ surf = vmw_user_object_surface(&vfbs->uo);
+ blit->body.srcImage.sid = surf->res.id;
blit->body.destScreenId = update->du->unit;
/* Update the source and destination bounding box later in post_clip */
@@ -1104,7 +1109,7 @@ int vmw_kms_sou_do_surface_dirty(struct vmw_private *dev_priv,
int ret;
if (!srf)
- srf = &vfbs->surface->res;
+ srf = &vmw_user_object_surface(&vfbs->uo)->res;
ret = vmw_validation_add_resource(&val_ctx, srf, 0, VMW_RES_DIRTY_NONE,
NULL, NULL);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
index a04e0736318d..5453f7cf0e2d 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
@@ -1,7 +1,8 @@
// SPDX-License-Identifier: GPL-2.0 OR MIT
/******************************************************************************
*
- * COPYRIGHT (C) 2014-2023 VMware, Inc., Palo Alto, CA., USA
+ * Copyright (c) 2014-2024 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
@@ -29,6 +30,7 @@
#include "vmwgfx_kms.h"
#include "vmwgfx_vkms.h"
#include "vmw_surface_cache.h"
+#include <linux/fsnotify.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
@@ -735,7 +737,7 @@ int vmw_kms_stdu_surface_dirty(struct vmw_private *dev_priv,
int ret;
if (!srf)
- srf = &vfbs->surface->res;
+ srf = &vmw_user_object_surface(&vfbs->uo)->res;
ret = vmw_validation_add_resource(&val_ctx, srf, 0, VMW_RES_DIRTY_NONE,
NULL, NULL);
@@ -746,12 +748,6 @@ int vmw_kms_stdu_surface_dirty(struct vmw_private *dev_priv,
if (ret)
goto out_unref;
- if (vfbs->is_bo_proxy) {
- ret = vmw_kms_update_proxy(srf, clips, num_clips, inc);
- if (ret)
- goto out_finish;
- }
-
sdirty.base.fifo_commit = vmw_kms_stdu_surface_fifo_commit;
sdirty.base.clip = vmw_kms_stdu_surface_clip;
sdirty.base.fifo_reserve_size = sizeof(struct vmw_stdu_surface_copy) +
@@ -765,7 +761,7 @@ int vmw_kms_stdu_surface_dirty(struct vmw_private *dev_priv,
ret = vmw_kms_helper_dirty(dev_priv, framebuffer, clips, vclips,
dest_x, dest_y, num_clips, inc,
&sdirty.base);
-out_finish:
+
vmw_kms_helper_validation_finish(dev_priv, NULL, &val_ctx, out_fence,
NULL);
@@ -877,6 +873,32 @@ vmw_stdu_connector_mode_valid(struct drm_connector *connector,
return MODE_OK;
}
+/*
+ * Trigger a modeset if the X,Y position of the Screen Target changes.
+ * This is needed when multi-mon is cycled. The original Screen Target will have
+ * the same mode but its relative X,Y position in the topology will change.
+ */
+static int vmw_stdu_connector_atomic_check(struct drm_connector *conn,
+ struct drm_atomic_state *state)
+{
+ struct drm_connector_state *conn_state;
+ struct vmw_screen_target_display_unit *du;
+ struct drm_crtc_state *new_crtc_state;
+
+ conn_state = drm_atomic_get_connector_state(state, conn);
+ du = vmw_connector_to_stdu(conn);
+
+ if (!conn_state->crtc)
+ return 0;
+
+ new_crtc_state = drm_atomic_get_new_crtc_state(state, conn_state->crtc);
+ if (du->base.gui_x != du->base.set_gui_x ||
+ du->base.gui_y != du->base.set_gui_y)
+ new_crtc_state->mode_changed = true;
+
+ return 0;
+}
+
static const struct drm_connector_funcs vmw_stdu_connector_funcs = {
.dpms = vmw_du_connector_dpms,
.detect = vmw_du_connector_detect,
@@ -891,7 +913,8 @@ static const struct drm_connector_funcs vmw_stdu_connector_funcs = {
static const struct
drm_connector_helper_funcs vmw_stdu_connector_helper_funcs = {
.get_modes = vmw_connector_get_modes,
- .mode_valid = vmw_stdu_connector_mode_valid
+ .mode_valid = vmw_stdu_connector_mode_valid,
+ .atomic_check = vmw_stdu_connector_atomic_check,
};
@@ -918,9 +941,8 @@ vmw_stdu_primary_plane_cleanup_fb(struct drm_plane *plane,
{
struct vmw_plane_state *vps = vmw_plane_state_to_vps(old_state);
- if (vps->surf)
+ if (vmw_user_object_surface(&vps->uo))
WARN_ON(!vps->pinned);
-
vmw_du_plane_cleanup_fb(plane, old_state);
vps->content_fb_type = SAME_AS_DISPLAY;
@@ -928,7 +950,6 @@ vmw_stdu_primary_plane_cleanup_fb(struct drm_plane *plane,
}
-
/**
* vmw_stdu_primary_plane_prepare_fb - Readies the display surface
*
@@ -952,13 +973,15 @@ vmw_stdu_primary_plane_prepare_fb(struct drm_plane *plane,
enum stdu_content_type new_content_type;
struct vmw_framebuffer_surface *new_vfbs;
uint32_t hdisplay = new_state->crtc_w, vdisplay = new_state->crtc_h;
+ struct drm_plane_state *old_state = plane->state;
+ struct drm_rect rect;
int ret;
/* No FB to prepare */
if (!new_fb) {
- if (vps->surf) {
+ if (vmw_user_object_surface(&vps->uo)) {
WARN_ON(vps->pinned != 0);
- vmw_surface_unreference(&vps->surf);
+ vmw_user_object_unref(&vps->uo);
}
return 0;
@@ -968,8 +991,8 @@ vmw_stdu_primary_plane_prepare_fb(struct drm_plane *plane,
new_vfbs = (vfb->bo) ? NULL : vmw_framebuffer_to_vfbs(new_fb);
if (new_vfbs &&
- new_vfbs->surface->metadata.base_size.width == hdisplay &&
- new_vfbs->surface->metadata.base_size.height == vdisplay)
+ vmw_user_object_surface(&new_vfbs->uo)->metadata.base_size.width == hdisplay &&
+ vmw_user_object_surface(&new_vfbs->uo)->metadata.base_size.height == vdisplay)
new_content_type = SAME_AS_DISPLAY;
else if (vfb->bo)
new_content_type = SEPARATE_BO;
@@ -1007,29 +1030,29 @@ vmw_stdu_primary_plane_prepare_fb(struct drm_plane *plane,
metadata.num_sizes = 1;
metadata.scanout = true;
} else {
- metadata = new_vfbs->surface->metadata;
+ metadata = vmw_user_object_surface(&new_vfbs->uo)->metadata;
}
metadata.base_size.width = hdisplay;
metadata.base_size.height = vdisplay;
metadata.base_size.depth = 1;
- if (vps->surf) {
+ if (vmw_user_object_surface(&vps->uo)) {
struct drm_vmw_size cur_base_size =
- vps->surf->metadata.base_size;
+ vmw_user_object_surface(&vps->uo)->metadata.base_size;
if (cur_base_size.width != metadata.base_size.width ||
cur_base_size.height != metadata.base_size.height ||
- vps->surf->metadata.format != metadata.format) {
+ vmw_user_object_surface(&vps->uo)->metadata.format != metadata.format) {
WARN_ON(vps->pinned != 0);
- vmw_surface_unreference(&vps->surf);
+ vmw_user_object_unref(&vps->uo);
}
}
- if (!vps->surf) {
+ if (!vmw_user_object_surface(&vps->uo)) {
ret = vmw_gb_surface_define(dev_priv, &metadata,
- &vps->surf);
+ &vps->uo.surface);
if (ret != 0) {
DRM_ERROR("Couldn't allocate STDU surface.\n");
return ret;
@@ -1042,18 +1065,19 @@ vmw_stdu_primary_plane_prepare_fb(struct drm_plane *plane,
* The only time we add a reference in prepare_fb is if the
* state object doesn't have a reference to begin with
*/
- if (vps->surf) {
+ if (vmw_user_object_surface(&vps->uo)) {
WARN_ON(vps->pinned != 0);
- vmw_surface_unreference(&vps->surf);
+ vmw_user_object_unref(&vps->uo);
}
- vps->surf = vmw_surface_reference(new_vfbs->surface);
+ memcpy(&vps->uo, &new_vfbs->uo, sizeof(vps->uo));
+ vmw_user_object_ref(&vps->uo);
}
- if (vps->surf) {
+ if (vmw_user_object_surface(&vps->uo)) {
/* Pin new surface before flipping */
- ret = vmw_resource_pin(&vps->surf->res, false);
+ ret = vmw_resource_pin(&vmw_user_object_surface(&vps->uo)->res, false);
if (ret)
goto out_srf_unref;
@@ -1063,6 +1087,34 @@ vmw_stdu_primary_plane_prepare_fb(struct drm_plane *plane,
vps->content_fb_type = new_content_type;
/*
+ * The drm fb code will do blit's via the vmap interface, which doesn't
+ * trigger vmw_bo page dirty tracking due to being kernel side (and thus
+ * doesn't require mmap'ing) so we have to update the surface's dirty
+ * regions by hand but we want to be careful to not overwrite the
+ * resource if it has been written to by the gpu (res_dirty).
+ */
+ if (vps->uo.buffer && vps->uo.buffer->is_dumb) {
+ struct vmw_surface *surf = vmw_user_object_surface(&vps->uo);
+ struct vmw_resource *res = &surf->res;
+
+ if (!res->res_dirty && drm_atomic_helper_damage_merged(old_state,
+ new_state,
+ &rect)) {
+ /*
+ * At some point it might be useful to actually translate
+ * (rect.x1, rect.y1) => start, and (rect.x2, rect.y2) => end,
+ * but currently the fb code will just report the entire fb
+ * dirty so in practice it doesn't matter.
+ */
+ pgoff_t start = res->guest_memory_offset >> PAGE_SHIFT;
+ pgoff_t end = __KERNEL_DIV_ROUND_UP(res->guest_memory_offset +
+ res->guest_memory_size,
+ PAGE_SIZE);
+ vmw_resource_dirty_update(res, start, end);
+ }
+ }
+
+ /*
* This should only happen if the buffer object is too large to create a
* proxy surface for.
*/
@@ -1072,7 +1124,7 @@ vmw_stdu_primary_plane_prepare_fb(struct drm_plane *plane,
return 0;
out_srf_unref:
- vmw_surface_unreference(&vps->surf);
+ vmw_user_object_unref(&vps->uo);
return ret;
}
@@ -1214,14 +1266,8 @@ static uint32_t
vmw_stdu_surface_fifo_size_same_display(struct vmw_du_update_plane *update,
uint32_t num_hits)
{
- struct vmw_framebuffer_surface *vfbs;
uint32_t size = 0;
- vfbs = container_of(update->vfb, typeof(*vfbs), base);
-
- if (vfbs->is_bo_proxy)
- size += sizeof(struct vmw_stdu_update_gb_image) * num_hits;
-
size += sizeof(struct vmw_stdu_update);
return size;
@@ -1230,14 +1276,8 @@ vmw_stdu_surface_fifo_size_same_display(struct vmw_du_update_plane *update,
static uint32_t vmw_stdu_surface_fifo_size(struct vmw_du_update_plane *update,
uint32_t num_hits)
{
- struct vmw_framebuffer_surface *vfbs;
uint32_t size = 0;
- vfbs = container_of(update->vfb, typeof(*vfbs), base);
-
- if (vfbs->is_bo_proxy)
- size += sizeof(struct vmw_stdu_update_gb_image) * num_hits;
-
size += sizeof(struct vmw_stdu_surface_copy) + sizeof(SVGA3dCopyBox) *
num_hits + sizeof(struct vmw_stdu_update);
@@ -1245,47 +1285,6 @@ static uint32_t vmw_stdu_surface_fifo_size(struct vmw_du_update_plane *update,
}
static uint32_t
-vmw_stdu_surface_update_proxy(struct vmw_du_update_plane *update, void *cmd)
-{
- struct vmw_framebuffer_surface *vfbs;
- struct drm_plane_state *state = update->plane->state;
- struct drm_plane_state *old_state = update->old_state;
- struct vmw_stdu_update_gb_image *cmd_update = cmd;
- struct drm_atomic_helper_damage_iter iter;
- struct drm_rect clip;
- uint32_t copy_size = 0;
-
- vfbs = container_of(update->vfb, typeof(*vfbs), base);
-
- /*
- * proxy surface is special where a buffer object type fb is wrapped
- * in a surface and need an update gb image command to sync with device.
- */
- drm_atomic_helper_damage_iter_init(&iter, old_state, state);
- drm_atomic_for_each_plane_damage(&iter, &clip) {
- SVGA3dBox *box = &cmd_update->body.box;
-
- cmd_update->header.id = SVGA_3D_CMD_UPDATE_GB_IMAGE;
- cmd_update->header.size = sizeof(cmd_update->body);
- cmd_update->body.image.sid = vfbs->surface->res.id;
- cmd_update->body.image.face = 0;
- cmd_update->body.image.mipmap = 0;
-
- box->x = clip.x1;
- box->y = clip.y1;
- box->z = 0;
- box->w = drm_rect_width(&clip);
- box->h = drm_rect_height(&clip);
- box->d = 1;
-
- copy_size += sizeof(*cmd_update);
- cmd_update++;
- }
-
- return copy_size;
-}
-
-static uint32_t
vmw_stdu_surface_populate_copy(struct vmw_du_update_plane *update, void *cmd,
uint32_t num_hits)
{
@@ -1299,7 +1298,7 @@ vmw_stdu_surface_populate_copy(struct vmw_du_update_plane *update, void *cmd,
cmd_copy->header.id = SVGA_3D_CMD_SURFACE_COPY;
cmd_copy->header.size = sizeof(cmd_copy->body) + sizeof(SVGA3dCopyBox) *
num_hits;
- cmd_copy->body.src.sid = vfbs->surface->res.id;
+ cmd_copy->body.src.sid = vmw_user_object_surface(&vfbs->uo)->res.id;
cmd_copy->body.dest.sid = stdu->display_srf->res.id;
return sizeof(*cmd_copy);
@@ -1370,10 +1369,7 @@ static int vmw_stdu_plane_update_surface(struct vmw_private *dev_priv,
srf_update.mutex = &dev_priv->cmdbuf_mutex;
srf_update.intr = true;
- if (vfbs->is_bo_proxy)
- srf_update.post_prepare = vmw_stdu_surface_update_proxy;
-
- if (vfbs->surface->res.id != stdu->display_srf->res.id) {
+ if (vmw_user_object_surface(&vfbs->uo)->res.id != stdu->display_srf->res.id) {
srf_update.calc_fifo_size = vmw_stdu_surface_fifo_size;
srf_update.pre_clip = vmw_stdu_surface_populate_copy;
srf_update.clip = vmw_stdu_surface_populate_clip;
@@ -1417,7 +1413,7 @@ vmw_stdu_primary_plane_atomic_update(struct drm_plane *plane,
stdu = vmw_crtc_to_stdu(crtc);
dev_priv = vmw_priv(crtc->dev);
- stdu->display_srf = vps->surf;
+ stdu->display_srf = vmw_user_object_surface(&vps->uo);
stdu->content_fb_type = vps->content_fb_type;
stdu->cpp = vps->cpp;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
index e7a744dfcecf..8ae6a761c900 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
@@ -1,7 +1,8 @@
// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
- * Copyright 2009-2023 VMware, Inc., Palo Alto, CA., USA
+ * Copyright (c) 2009-2024 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
@@ -36,9 +37,6 @@
#include <drm/ttm/ttm_placement.h>
#define SVGA3D_FLAGS_64(upper32, lower32) (((uint64_t)upper32 << 32) | lower32)
-#define SVGA3D_FLAGS_UPPER_32(svga3d_flags) (svga3d_flags >> 32)
-#define SVGA3D_FLAGS_LOWER_32(svga3d_flags) \
- (svga3d_flags & ((uint64_t)U32_MAX))
/**
* struct vmw_user_surface - User-space visible surface resource
@@ -686,6 +684,14 @@ static void vmw_user_surface_base_release(struct ttm_base_object **p_base)
struct vmw_resource *res = &user_srf->srf.res;
*p_base = NULL;
+
+ /*
+ * Dumb buffers own the resource and they'll unref the
+ * resource themselves
+ */
+ if (res && res->guest_memory_bo && res->guest_memory_bo->is_dumb)
+ return;
+
vmw_resource_unreference(&res);
}
@@ -812,7 +818,8 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
}
}
res->guest_memory_size = cur_bo_offset;
- if (metadata->scanout &&
+ if (!file_priv->atomic &&
+ metadata->scanout &&
metadata->num_sizes == 1 &&
metadata->sizes[0].width == VMW_CURSOR_SNOOP_WIDTH &&
metadata->sizes[0].height == VMW_CURSOR_SNOOP_HEIGHT &&
@@ -864,6 +871,7 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
vmw_resource_unreference(&res);
goto out_unlock;
}
+ vmw_bo_add_detached_resource(res->guest_memory_bo, res);
}
tmp = vmw_resource_reference(&srf->res);
@@ -892,6 +900,113 @@ out_unlock:
return ret;
}
+static struct vmw_user_surface *
+vmw_lookup_user_surface_for_buffer(struct vmw_private *vmw, struct vmw_bo *bo,
+ u32 handle)
+{
+ struct vmw_user_surface *user_srf = NULL;
+ struct vmw_surface *surf;
+ struct ttm_base_object *base;
+
+ surf = vmw_bo_surface(bo);
+ if (surf) {
+ rcu_read_lock();
+ user_srf = container_of(surf, struct vmw_user_surface, srf);
+ base = &user_srf->prime.base;
+ if (base && !kref_get_unless_zero(&base->refcount)) {
+ drm_dbg_driver(&vmw->drm,
+ "%s: referencing a stale surface handle %d\n",
+ __func__, handle);
+ base = NULL;
+ user_srf = NULL;
+ }
+ rcu_read_unlock();
+ }
+
+ return user_srf;
+}
+
+struct vmw_surface *vmw_lookup_surface_for_buffer(struct vmw_private *vmw,
+ struct vmw_bo *bo,
+ u32 handle)
+{
+ struct vmw_user_surface *user_srf =
+ vmw_lookup_user_surface_for_buffer(vmw, bo, handle);
+ struct vmw_surface *surf = NULL;
+ struct ttm_base_object *base;
+
+ if (user_srf) {
+ surf = vmw_surface_reference(&user_srf->srf);
+ base = &user_srf->prime.base;
+ ttm_base_object_unref(&base);
+ }
+ return surf;
+}
+
+u32 vmw_lookup_surface_handle_for_buffer(struct vmw_private *vmw,
+ struct vmw_bo *bo,
+ u32 handle)
+{
+ struct vmw_user_surface *user_srf =
+ vmw_lookup_user_surface_for_buffer(vmw, bo, handle);
+ int surf_handle = 0;
+ struct ttm_base_object *base;
+
+ if (user_srf) {
+ base = &user_srf->prime.base;
+ surf_handle = (u32)base->handle;
+ ttm_base_object_unref(&base);
+ }
+ return surf_handle;
+}
+
+static int vmw_buffer_prime_to_surface_base(struct vmw_private *dev_priv,
+ struct drm_file *file_priv,
+ u32 fd, u32 *handle,
+ struct ttm_base_object **base_p)
+{
+ struct ttm_base_object *base;
+ struct vmw_bo *bo;
+ struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+ struct vmw_user_surface *user_srf;
+ int ret;
+
+ ret = drm_gem_prime_fd_to_handle(&dev_priv->drm, file_priv, fd, handle);
+ if (ret) {
+ drm_warn(&dev_priv->drm,
+ "Wasn't able to find user buffer for fd = %u.\n", fd);
+ return ret;
+ }
+
+ ret = vmw_user_bo_lookup(file_priv, *handle, &bo);
+ if (ret) {
+ drm_warn(&dev_priv->drm,
+ "Wasn't able to lookup user buffer for handle = %u.\n", *handle);
+ return ret;
+ }
+
+ user_srf = vmw_lookup_user_surface_for_buffer(dev_priv, bo, *handle);
+ if (WARN_ON(!user_srf)) {
+ drm_warn(&dev_priv->drm,
+ "User surface fd %d (handle %d) is null.\n", fd, *handle);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ base = &user_srf->prime.base;
+ ret = ttm_ref_object_add(tfile, base, NULL, false);
+ if (ret) {
+ drm_warn(&dev_priv->drm,
+ "Couldn't add an object ref for the buffer (%d).\n", *handle);
+ goto out;
+ }
+
+ *base_p = base;
+out:
+ vmw_user_bo_unref(&bo);
+
+ return ret;
+}
static int
vmw_surface_handle_reference(struct vmw_private *dev_priv,
@@ -901,15 +1016,19 @@ vmw_surface_handle_reference(struct vmw_private *dev_priv,
struct ttm_base_object **base_p)
{
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
- struct vmw_user_surface *user_srf;
+ struct vmw_user_surface *user_srf = NULL;
uint32_t handle;
struct ttm_base_object *base;
int ret;
if (handle_type == DRM_VMW_HANDLE_PRIME) {
ret = ttm_prime_fd_to_handle(tfile, u_handle, &handle);
- if (unlikely(ret != 0))
- return ret;
+ if (ret)
+ return vmw_buffer_prime_to_surface_base(dev_priv,
+ file_priv,
+ u_handle,
+ &handle,
+ base_p);
} else {
handle = u_handle;
}
@@ -1503,7 +1622,12 @@ vmw_gb_surface_define_internal(struct drm_device *dev,
ret = vmw_user_bo_lookup(file_priv, req->base.buffer_handle,
&res->guest_memory_bo);
if (ret == 0) {
- if (res->guest_memory_bo->tbo.base.size < res->guest_memory_size) {
+ if (res->guest_memory_bo->is_dumb) {
+ VMW_DEBUG_USER("Can't backup surface with a dumb buffer.\n");
+ vmw_user_bo_unref(&res->guest_memory_bo);
+ ret = -EINVAL;
+ goto out_unlock;
+ } else if (res->guest_memory_bo->tbo.base.size < res->guest_memory_size) {
VMW_DEBUG_USER("Surface backup buffer too small.\n");
vmw_user_bo_unref(&res->guest_memory_bo);
ret = -EINVAL;
@@ -1560,6 +1684,7 @@ vmw_gb_surface_define_internal(struct drm_device *dev,
rep->handle = user_srf->prime.base.handle;
rep->backup_size = res->guest_memory_size;
if (res->guest_memory_bo) {
+ vmw_bo_add_detached_resource(res->guest_memory_bo, res);
rep->buffer_map_handle =
drm_vma_node_offset_addr(&res->guest_memory_bo->tbo.base.vma_node);
rep->buffer_size = res->guest_memory_bo->tbo.base.size;
@@ -2100,3 +2225,140 @@ int vmw_gb_surface_define(struct vmw_private *dev_priv,
out_unlock:
return ret;
}
+
+static SVGA3dSurfaceFormat vmw_format_bpp_to_svga(struct vmw_private *vmw,
+ int bpp)
+{
+ switch (bpp) {
+ case 8: /* DRM_FORMAT_C8 */
+ return SVGA3D_P8;
+ case 16: /* DRM_FORMAT_RGB565 */
+ return SVGA3D_R5G6B5;
+ case 32: /* DRM_FORMAT_XRGB8888 */
+ if (has_sm4_context(vmw))
+ return SVGA3D_B8G8R8X8_UNORM;
+ return SVGA3D_X8R8G8B8;
+ default:
+ drm_warn(&vmw->drm, "Unsupported format bpp: %d\n", bpp);
+ return SVGA3D_X8R8G8B8;
+ }
+}
+
+/**
+ * vmw_dumb_create - Create a dumb kms buffer
+ *
+ * @file_priv: Pointer to a struct drm_file identifying the caller.
+ * @dev: Pointer to the drm device.
+ * @args: Pointer to a struct drm_mode_create_dumb structure
+ * Return: Zero on success, negative error code on failure.
+ *
+ * This is a driver callback for the core drm create_dumb functionality.
+ * Note that this is very similar to the vmw_bo_alloc ioctl, except
+ * that the arguments have a different format.
+ */
+int vmw_dumb_create(struct drm_file *file_priv,
+ struct drm_device *dev,
+ struct drm_mode_create_dumb *args)
+{
+ struct vmw_private *dev_priv = vmw_priv(dev);
+ struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+ struct vmw_bo *vbo = NULL;
+ struct vmw_resource *res = NULL;
+ union drm_vmw_gb_surface_create_ext_arg arg = { 0 };
+ struct drm_vmw_gb_surface_create_ext_req *req = &arg.req;
+ int ret;
+ struct drm_vmw_size drm_size = {
+ .width = args->width,
+ .height = args->height,
+ .depth = 1,
+ };
+ SVGA3dSurfaceFormat format = vmw_format_bpp_to_svga(dev_priv, args->bpp);
+ const struct SVGA3dSurfaceDesc *desc = vmw_surface_get_desc(format);
+ SVGA3dSurfaceAllFlags flags = SVGA3D_SURFACE_HINT_TEXTURE |
+ SVGA3D_SURFACE_HINT_RENDERTARGET |
+ SVGA3D_SURFACE_SCREENTARGET |
+ SVGA3D_SURFACE_BIND_SHADER_RESOURCE |
+ SVGA3D_SURFACE_BIND_RENDER_TARGET;
+
+ /*
+ * Without mob support we're just going to use raw memory buffer
+ * because we wouldn't be able to support full surface coherency
+ * without mobs
+ */
+ if (!dev_priv->has_mob) {
+ int cpp = DIV_ROUND_UP(args->bpp, 8);
+
+ switch (cpp) {
+ case 1: /* DRM_FORMAT_C8 */
+ case 2: /* DRM_FORMAT_RGB565 */
+ case 4: /* DRM_FORMAT_XRGB8888 */
+ break;
+ default:
+ /*
+ * Dumb buffers don't allow anything else.
+ * This is tested via IGT's dumb_buffers
+ */
+ return -EINVAL;
+ }
+
+ args->pitch = args->width * cpp;
+ args->size = ALIGN(args->pitch * args->height, PAGE_SIZE);
+
+ ret = vmw_gem_object_create_with_handle(dev_priv, file_priv,
+ args->size, &args->handle,
+ &vbo);
+ /* drop reference from allocate - handle holds it now */
+ drm_gem_object_put(&vbo->tbo.base);
+ return ret;
+ }
+
+ req->version = drm_vmw_gb_surface_v1;
+ req->multisample_pattern = SVGA3D_MS_PATTERN_NONE;
+ req->quality_level = SVGA3D_MS_QUALITY_NONE;
+ req->buffer_byte_stride = 0;
+ req->must_be_zero = 0;
+ req->base.svga3d_flags = SVGA3D_FLAGS_LOWER_32(flags);
+ req->svga3d_flags_upper_32_bits = SVGA3D_FLAGS_UPPER_32(flags);
+ req->base.format = (uint32_t)format;
+ req->base.drm_surface_flags = drm_vmw_surface_flag_scanout;
+ req->base.drm_surface_flags |= drm_vmw_surface_flag_shareable;
+ req->base.drm_surface_flags |= drm_vmw_surface_flag_create_buffer;
+ req->base.drm_surface_flags |= drm_vmw_surface_flag_coherent;
+ req->base.base_size.width = args->width;
+ req->base.base_size.height = args->height;
+ req->base.base_size.depth = 1;
+ req->base.array_size = 0;
+ req->base.mip_levels = 1;
+ req->base.multisample_count = 0;
+ req->base.buffer_handle = SVGA3D_INVALID_ID;
+ req->base.autogen_filter = SVGA3D_TEX_FILTER_NONE;
+ ret = vmw_gb_surface_define_ext_ioctl(dev, &arg, file_priv);
+ if (ret) {
+ drm_warn(dev, "Unable to create a dumb buffer\n");
+ return ret;
+ }
+
+ args->handle = arg.rep.buffer_handle;
+ args->size = arg.rep.buffer_size;
+ args->pitch = vmw_surface_calculate_pitch(desc, &drm_size);
+
+ ret = vmw_user_resource_lookup_handle(dev_priv, tfile, arg.rep.handle,
+ user_surface_converter,
+ &res);
+ if (ret) {
+ drm_err(dev, "Created resource handle doesn't exist!\n");
+ goto err;
+ }
+
+ vbo = res->guest_memory_bo;
+ vbo->is_dumb = true;
+ vbo->dumb_surface = vmw_res_to_srf(res);
+
+err:
+ if (res)
+ vmw_resource_unreference(&res);
+ if (ret)
+ ttm_ref_object_base_unref(tfile, arg.rep.handle);
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_vkms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_vkms.c
index 3bfcf671fcd5..8651b788e98b 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_vkms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_vkms.c
@@ -75,7 +75,7 @@ done:
return ret;
}
-static int
+static void
compute_crc(struct drm_crtc *crtc,
struct vmw_surface *surf,
u32 *crc)
@@ -101,8 +101,6 @@ compute_crc(struct drm_crtc *crtc,
}
vmw_bo_unmap(bo);
-
- return 0;
}
static void
@@ -116,7 +114,6 @@ crc_generate_worker(struct work_struct *work)
u64 frame_start, frame_end;
u32 crc32 = 0;
struct vmw_surface *surf = 0;
- int ret;
spin_lock_irq(&du->vkms.crc_state_lock);
crc_pending = du->vkms.crc_pending;
@@ -130,22 +127,24 @@ crc_generate_worker(struct work_struct *work)
return;
spin_lock_irq(&du->vkms.crc_state_lock);
- surf = du->vkms.surface;
+ surf = vmw_surface_reference(du->vkms.surface);
spin_unlock_irq(&du->vkms.crc_state_lock);
- if (vmw_surface_sync(vmw, surf)) {
- drm_warn(crtc->dev, "CRC worker wasn't able to sync the crc surface!\n");
- return;
- }
+ if (surf) {
+ if (vmw_surface_sync(vmw, surf)) {
+ drm_warn(
+ crtc->dev,
+ "CRC worker wasn't able to sync the crc surface!\n");
+ return;
+ }
- ret = compute_crc(crtc, surf, &crc32);
- if (ret)
- return;
+ compute_crc(crtc, surf, &crc32);
+ vmw_surface_unreference(&surf);
+ }
spin_lock_irq(&du->vkms.crc_state_lock);
frame_start = du->vkms.frame_start;
frame_end = du->vkms.frame_end;
- crc_pending = du->vkms.crc_pending;
du->vkms.frame_start = 0;
du->vkms.frame_end = 0;
du->vkms.crc_pending = false;
@@ -164,7 +163,7 @@ vmw_vkms_vblank_simulate(struct hrtimer *timer)
struct vmw_display_unit *du = container_of(timer, struct vmw_display_unit, vkms.timer);
struct drm_crtc *crtc = &du->crtc;
struct vmw_private *vmw = vmw_priv(crtc->dev);
- struct vmw_surface *surf = NULL;
+ bool has_surface = false;
u64 ret_overrun;
bool locked, ret;
@@ -179,10 +178,10 @@ vmw_vkms_vblank_simulate(struct hrtimer *timer)
WARN_ON(!ret);
if (!locked)
return HRTIMER_RESTART;
- surf = du->vkms.surface;
+ has_surface = du->vkms.surface != NULL;
vmw_vkms_unlock(crtc);
- if (du->vkms.crc_enabled && surf) {
+ if (du->vkms.crc_enabled && has_surface) {
u64 frame = drm_crtc_accurate_vblank_count(crtc);
spin_lock(&du->vkms.crc_state_lock);
@@ -336,6 +335,8 @@ vmw_vkms_crtc_cleanup(struct drm_crtc *crtc)
{
struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
+ if (du->vkms.surface)
+ vmw_surface_unreference(&du->vkms.surface);
WARN_ON(work_pending(&du->vkms.crc_generator_work));
hrtimer_cancel(&du->vkms.timer);
}
@@ -497,9 +498,12 @@ vmw_vkms_set_crc_surface(struct drm_crtc *crtc,
struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
struct vmw_private *vmw = vmw_priv(crtc->dev);
- if (vmw->vkms_enabled) {
+ if (vmw->vkms_enabled && du->vkms.surface != surf) {
WARN_ON(atomic_read(&du->vkms.atomic_lock) != VMW_VKMS_LOCK_MODESET);
- du->vkms.surface = surf;
+ if (du->vkms.surface)
+ vmw_surface_unreference(&du->vkms.surface);
+ if (surf)
+ du->vkms.surface = vmw_surface_reference(surf);
}
}
diff --git a/drivers/gpu/drm/xe/Makefile b/drivers/gpu/drm/xe/Makefile
index 628c245c4822..1ff9602a52f6 100644
--- a/drivers/gpu/drm/xe/Makefile
+++ b/drivers/gpu/drm/xe/Makefile
@@ -12,32 +12,15 @@ subdir-ccflags-$(CONFIG_DRM_XE_WERROR) += -Werror
subdir-ccflags-y += -I$(obj) -I$(src)
# generated sources
-hostprogs := xe_gen_wa_oob
+hostprogs := xe_gen_wa_oob
generated_oob := $(obj)/generated/xe_wa_oob.c $(obj)/generated/xe_wa_oob.h
-
quiet_cmd_wa_oob = GEN $(notdir $(generated_oob))
cmd_wa_oob = mkdir -p $(@D); $^ $(generated_oob)
-
$(obj)/generated/%_wa_oob.c $(obj)/generated/%_wa_oob.h: $(obj)/xe_gen_wa_oob \
$(src)/xe_wa_oob.rules
$(call cmd,wa_oob)
-uses_generated_oob := \
- $(obj)/xe_ggtt.o \
- $(obj)/xe_gsc.o \
- $(obj)/xe_gt.o \
- $(obj)/xe_guc.o \
- $(obj)/xe_guc_ads.o \
- $(obj)/xe_guc_pc.o \
- $(obj)/xe_migrate.o \
- $(obj)/xe_ring_ops.o \
- $(obj)/xe_vm.o \
- $(obj)/xe_wa.o \
- $(obj)/xe_ttm_stolen_mgr.o
-
-$(uses_generated_oob): $(generated_oob)
-
# Please keep these build lists sorted!
# core driver code
@@ -192,6 +175,7 @@ xe-$(CONFIG_DRM_XE_DISPLAY) += \
display/xe_display.o \
display/xe_display_misc.o \
display/xe_display_rps.o \
+ display/xe_display_wa.o \
display/xe_dsb_buffer.o \
display/xe_fb_pin.o \
display/xe_hdcp_gsc.o \
@@ -320,3 +304,6 @@ quiet_cmd_hdrtest = HDRTEST $(patsubst %.hdrtest,%.h,$@)
$(obj)/%.hdrtest: $(src)/%.h FORCE
$(call if_changed_dep,hdrtest)
+
+uses_generated_oob := $(addprefix $(obj)/, $(xe-y))
+$(uses_generated_oob): $(obj)/generated/xe_wa_oob.h
diff --git a/drivers/gpu/drm/xe/display/intel_fbdev_fb.c b/drivers/gpu/drm/xe/display/intel_fbdev_fb.c
index 816ad13821a8..cd8948c08661 100644
--- a/drivers/gpu/drm/xe/display/intel_fbdev_fb.c
+++ b/drivers/gpu/drm/xe/display/intel_fbdev_fb.c
@@ -10,6 +10,9 @@
#include "xe_bo.h"
#include "xe_gt.h"
#include "xe_ttm_stolen_mgr.h"
+#include "xe_wa.h"
+
+#include <generated/xe_wa_oob.h>
struct intel_framebuffer *intel_fbdev_fb_alloc(struct drm_fb_helper *helper,
struct drm_fb_helper_surface_size *sizes)
@@ -37,7 +40,7 @@ struct intel_framebuffer *intel_fbdev_fb_alloc(struct drm_fb_helper *helper,
size = PAGE_ALIGN(size);
obj = ERR_PTR(-ENODEV);
- if (!IS_DGFX(xe)) {
+ if (!IS_DGFX(xe) && !XE_WA(xe_root_mmio_gt(xe), 22019338487_display)) {
obj = xe_bo_create_pin_map(xe, xe_device_get_root_tile(xe),
NULL, size,
ttm_bo_type_kernel, XE_BO_FLAG_SCANOUT |
@@ -48,6 +51,7 @@ struct intel_framebuffer *intel_fbdev_fb_alloc(struct drm_fb_helper *helper,
else
drm_info(&xe->drm, "Allocated fbdev into stolen failed: %li\n", PTR_ERR(obj));
}
+
if (IS_ERR(obj)) {
obj = xe_bo_create_pin_map(xe, xe_device_get_root_tile(xe), NULL, size,
ttm_bo_type_kernel, XE_BO_FLAG_SCANOUT |
diff --git a/drivers/gpu/drm/xe/display/xe_display_wa.c b/drivers/gpu/drm/xe/display/xe_display_wa.c
new file mode 100644
index 000000000000..68e3d1959ad6
--- /dev/null
+++ b/drivers/gpu/drm/xe/display/xe_display_wa.c
@@ -0,0 +1,16 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2024 Intel Corporation
+ */
+
+#include "intel_display_wa.h"
+
+#include "xe_device.h"
+#include "xe_wa.h"
+
+#include <generated/xe_wa_oob.h>
+
+bool intel_display_needs_wa_16023588340(struct drm_i915_private *i915)
+{
+ return XE_WA(xe_root_mmio_gt(i915), 16023588340);
+}
diff --git a/drivers/gpu/drm/xe/display/xe_dsb_buffer.c b/drivers/gpu/drm/xe/display/xe_dsb_buffer.c
index 9e860c61f4b3..ccd0d87d438a 100644
--- a/drivers/gpu/drm/xe/display/xe_dsb_buffer.c
+++ b/drivers/gpu/drm/xe/display/xe_dsb_buffer.c
@@ -7,6 +7,8 @@
#include "intel_display_types.h"
#include "intel_dsb_buffer.h"
#include "xe_bo.h"
+#include "xe_device.h"
+#include "xe_device_types.h"
#include "xe_gt.h"
u32 intel_dsb_buffer_ggtt_offset(struct intel_dsb_buffer *dsb_buf)
@@ -16,7 +18,10 @@ u32 intel_dsb_buffer_ggtt_offset(struct intel_dsb_buffer *dsb_buf)
void intel_dsb_buffer_write(struct intel_dsb_buffer *dsb_buf, u32 idx, u32 val)
{
+ struct xe_device *xe = dsb_buf->vma->bo->tile->xe;
+
iosys_map_wr(&dsb_buf->vma->bo->vmap, idx * 4, u32, val);
+ xe_device_l2_flush(xe);
}
u32 intel_dsb_buffer_read(struct intel_dsb_buffer *dsb_buf, u32 idx)
@@ -26,9 +31,12 @@ u32 intel_dsb_buffer_read(struct intel_dsb_buffer *dsb_buf, u32 idx)
void intel_dsb_buffer_memset(struct intel_dsb_buffer *dsb_buf, u32 idx, u32 val, size_t size)
{
+ struct xe_device *xe = dsb_buf->vma->bo->tile->xe;
+
WARN_ON(idx > (dsb_buf->buf_size - size) / sizeof(*dsb_buf->cmd_buf));
iosys_map_memset(&dsb_buf->vma->bo->vmap, idx * 4, val, size);
+ xe_device_l2_flush(xe);
}
bool intel_dsb_buffer_create(struct intel_crtc *crtc, struct intel_dsb_buffer *dsb_buf, size_t size)
diff --git a/drivers/gpu/drm/xe/display/xe_fb_pin.c b/drivers/gpu/drm/xe/display/xe_fb_pin.c
index 423f367c7065..d7db44e79eaf 100644
--- a/drivers/gpu/drm/xe/display/xe_fb_pin.c
+++ b/drivers/gpu/drm/xe/display/xe_fb_pin.c
@@ -10,6 +10,7 @@
#include "intel_fb.h"
#include "intel_fb_pin.h"
#include "xe_bo.h"
+#include "xe_device.h"
#include "xe_ggtt.h"
#include "xe_gt.h"
#include "xe_pm.h"
@@ -304,6 +305,8 @@ static struct i915_vma *__xe_pin_fb_vma(const struct intel_framebuffer *fb,
if (ret)
goto err_unpin;
+ /* Ensure DPT writes are flushed */
+ xe_device_l2_flush(xe);
return vma;
err_unpin:
diff --git a/drivers/gpu/drm/xe/display/xe_plane_initial.c b/drivers/gpu/drm/xe/display/xe_plane_initial.c
index 5eccd6abb3ef..a50ab9eae40a 100644
--- a/drivers/gpu/drm/xe/display/xe_plane_initial.c
+++ b/drivers/gpu/drm/xe/display/xe_plane_initial.c
@@ -18,6 +18,9 @@
#include "intel_frontbuffer.h"
#include "intel_plane_initial.h"
#include "xe_bo.h"
+#include "xe_wa.h"
+
+#include <generated/xe_wa_oob.h>
static bool
intel_reuse_initial_plane_obj(struct intel_crtc *this,
@@ -104,6 +107,9 @@ initial_plane_bo(struct xe_device *xe,
phys_base = base;
flags |= XE_BO_FLAG_STOLEN;
+ if (XE_WA(xe_root_mmio_gt(xe), 22019338487_display))
+ return NULL;
+
/*
* If the FB is too big, just don't use it since fbdev is not very
* important and we should probably use that space with FBC or other
diff --git a/drivers/gpu/drm/xe/regs/xe_gt_regs.h b/drivers/gpu/drm/xe/regs/xe_gt_regs.h
index d44564bad009..3b87f95f9ecf 100644
--- a/drivers/gpu/drm/xe/regs/xe_gt_regs.h
+++ b/drivers/gpu/drm/xe/regs/xe_gt_regs.h
@@ -80,6 +80,9 @@
#define LE_CACHEABILITY_MASK REG_GENMASK(1, 0)
#define LE_CACHEABILITY(value) REG_FIELD_PREP(LE_CACHEABILITY_MASK, value)
+#define XE2_GAMREQSTRM_CTRL XE_REG(0x4194)
+#define CG_DIS_CNTLBUS REG_BIT(6)
+
#define CCS_AUX_INV XE_REG(0x4208)
#define VD0_AUX_INV XE_REG(0x4218)
@@ -88,6 +91,8 @@
#define VE1_AUX_INV XE_REG(0x42b8)
#define AUX_INV REG_BIT(0)
+#define XE2_LMEM_CFG XE_REG(0x48b0)
+
#define XEHP_TILE_ADDR_RANGE(_idx) XE_REG_MCR(0x4900 + (_idx) * 4)
#define XEHP_FLAT_CCS_BASE_ADDR XE_REG_MCR(0x4910)
#define XEHP_FLAT_CCS_PTR REG_GENMASK(31, 8)
@@ -103,6 +108,7 @@
#define FF_MODE XE_REG_MCR(0x6210)
#define DIS_TE_AUTOSTRIP REG_BIT(31)
+#define VS_HIT_MAX_VALUE_MASK REG_GENMASK(25, 20)
#define DIS_MESH_PARTIAL_AUTOSTRIP REG_BIT(16)
#define DIS_MESH_AUTOSTRIP REG_BIT(15)
@@ -372,6 +378,11 @@
#define XEHPC_L3CLOS_MASK(i) XE_REG_MCR(0xb194 + (i) * 8)
+#define XE2_GLOBAL_INVAL XE_REG(0xb404)
+
+#define SCRATCH1LPFC XE_REG(0xb474)
+#define EN_L3_RW_CCS_CACHE_FLUSH REG_BIT(0)
+
#define XE2LPM_L3SQCREG5 XE_REG_MCR(0xb658)
#define XE2_TDF_CTRL XE_REG(0xb418)
@@ -395,6 +406,10 @@
#define INVALIDATION_BROADCAST_MODE_DIS REG_BIT(12)
#define GLOBAL_INVALIDATION_MODE REG_BIT(2)
+#define LMEM_CFG XE_REG(0xcf58)
+#define LMEM_EN REG_BIT(31)
+#define LMTT_DIR_PTR REG_GENMASK(30, 0) /* in multiples of 64KB */
+
#define HALF_SLICE_CHICKEN5 XE_REG_MCR(0xe188, XE_REG_OPTION_MASKED)
#define DISABLE_SAMPLE_G_PERFORMANCE REG_BIT(0)
diff --git a/drivers/gpu/drm/xe/regs/xe_regs.h b/drivers/gpu/drm/xe/regs/xe_regs.h
index 23e33ec84902..dfa869f0dddd 100644
--- a/drivers/gpu/drm/xe/regs/xe_regs.h
+++ b/drivers/gpu/drm/xe/regs/xe_regs.h
@@ -15,8 +15,6 @@
#define GU_MISC_IRQ_OFFSET 0x444f0
#define GU_MISC_GSE REG_BIT(27)
-#define SOFTWARE_FLAGS_SPR33 XE_REG(0x4f084)
-
#define GU_CNTL_PROTECTED XE_REG(0x10100C)
#define DRIVERINT_FLR_DIS REG_BIT(31)
@@ -24,11 +22,14 @@
#define LMEM_INIT REG_BIT(7)
#define DRIVERFLR REG_BIT(31)
+#define XEHP_CLOCK_GATE_DIS XE_REG(0x101014)
+#define SGSI_SIDECLK_DIS REG_BIT(17)
+
#define GU_DEBUG XE_REG(0x101018)
#define DRIVERFLR_STATUS REG_BIT(31)
-#define XEHP_CLOCK_GATE_DIS XE_REG(0x101014)
-#define SGSI_SIDECLK_DIS REG_BIT(17)
+#define VIRTUAL_CTRL_REG XE_REG(0x10108c)
+#define GUEST_GTT_UPDATE_EN REG_BIT(8)
#define XEHP_MTCFG_ADDR XE_REG(0x101800)
#define TILE_COUNT REG_GENMASK(15, 8)
@@ -66,6 +67,9 @@
#define DISPLAY_IRQ REG_BIT(16)
#define GT_DW_IRQ(x) REG_BIT(x)
+#define VF_CAP_REG XE_REG(0x1901f8, XE_REG_OPTION_VF)
+#define VF_CAP REG_BIT(0)
+
#define PVC_RP_STATE_CAP XE_REG(0x281014)
#endif
diff --git a/drivers/gpu/drm/xe/regs/xe_sriov_regs.h b/drivers/gpu/drm/xe/regs/xe_sriov_regs.h
deleted file mode 100644
index 017b4ddd1ecf..000000000000
--- a/drivers/gpu/drm/xe/regs/xe_sriov_regs.h
+++ /dev/null
@@ -1,23 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-/*
- * Copyright © 2023 Intel Corporation
- */
-
-#ifndef _REGS_XE_SRIOV_REGS_H_
-#define _REGS_XE_SRIOV_REGS_H_
-
-#include "regs/xe_reg_defs.h"
-
-#define XE2_LMEM_CFG XE_REG(0x48b0)
-
-#define LMEM_CFG XE_REG(0xcf58)
-#define LMEM_EN REG_BIT(31)
-#define LMTT_DIR_PTR REG_GENMASK(30, 0) /* in multiples of 64KB */
-
-#define VIRTUAL_CTRL_REG XE_REG(0x10108c)
-#define GUEST_GTT_UPDATE_EN REG_BIT(8)
-
-#define VF_CAP_REG XE_REG(0x1901f8, XE_REG_OPTION_VF)
-#define VF_CAP REG_BIT(0)
-
-#endif
diff --git a/drivers/gpu/drm/xe/tests/Makefile b/drivers/gpu/drm/xe/tests/Makefile
index 6e58931fddd4..0e3408f4952c 100644
--- a/drivers/gpu/drm/xe/tests/Makefile
+++ b/drivers/gpu/drm/xe/tests/Makefile
@@ -2,11 +2,7 @@
# "live" kunit tests
obj-$(CONFIG_DRM_XE_KUNIT_TEST) += xe_live_test.o
-xe_live_test-y = xe_live_test_mod.o \
- xe_bo_test.o \
- xe_dma_buf_test.o \
- xe_migrate_test.o \
- xe_mocs_test.o
+xe_live_test-y = xe_live_test_mod.o
# Normal kunit tests
obj-$(CONFIG_DRM_XE_KUNIT_TEST) += xe_test.o
diff --git a/drivers/gpu/drm/xe/tests/xe_bo.c b/drivers/gpu/drm/xe/tests/xe_bo.c
index 9f3c02826464..1768483da1b7 100644
--- a/drivers/gpu/drm/xe/tests/xe_bo.c
+++ b/drivers/gpu/drm/xe/tests/xe_bo.c
@@ -6,7 +6,7 @@
#include <kunit/test.h>
#include <kunit/visibility.h>
-#include "tests/xe_bo_test.h"
+#include "tests/xe_kunit_helpers.h"
#include "tests/xe_pci_test.h"
#include "tests/xe_test.h"
@@ -154,12 +154,18 @@ out_unlock:
static int ccs_test_run_device(struct xe_device *xe)
{
- struct kunit *test = xe_cur_kunit();
+ struct kunit *test = kunit_get_current_test();
struct xe_tile *tile;
int id;
if (!xe_device_has_flat_ccs(xe)) {
- kunit_info(test, "Skipping non-flat-ccs device.\n");
+ kunit_skip(test, "non-flat-ccs device\n");
+ return 0;
+ }
+
+ /* For xe2+ dgfx, we don't handle ccs metadata */
+ if (GRAPHICS_VER(xe) >= 20 && IS_DGFX(xe)) {
+ kunit_skip(test, "xe2+ dgfx device\n");
return 0;
}
@@ -177,11 +183,12 @@ static int ccs_test_run_device(struct xe_device *xe)
return 0;
}
-void xe_ccs_migrate_kunit(struct kunit *test)
+static void xe_ccs_migrate_kunit(struct kunit *test)
{
- xe_call_for_each_device(ccs_test_run_device);
+ struct xe_device *xe = test->priv;
+
+ ccs_test_run_device(xe);
}
-EXPORT_SYMBOL_IF_KUNIT(xe_ccs_migrate_kunit);
static int evict_test_run_tile(struct xe_device *xe, struct xe_tile *tile, struct kunit *test)
{
@@ -325,13 +332,12 @@ cleanup_bo:
static int evict_test_run_device(struct xe_device *xe)
{
- struct kunit *test = xe_cur_kunit();
+ struct kunit *test = kunit_get_current_test();
struct xe_tile *tile;
int id;
if (!IS_DGFX(xe)) {
- kunit_info(test, "Skipping non-discrete device %s.\n",
- dev_name(xe->drm.dev));
+ kunit_skip(test, "non-discrete device\n");
return 0;
}
@@ -345,8 +351,23 @@ static int evict_test_run_device(struct xe_device *xe)
return 0;
}
-void xe_bo_evict_kunit(struct kunit *test)
+static void xe_bo_evict_kunit(struct kunit *test)
{
- xe_call_for_each_device(evict_test_run_device);
+ struct xe_device *xe = test->priv;
+
+ evict_test_run_device(xe);
}
-EXPORT_SYMBOL_IF_KUNIT(xe_bo_evict_kunit);
+
+static struct kunit_case xe_bo_tests[] = {
+ KUNIT_CASE_PARAM(xe_ccs_migrate_kunit, xe_pci_live_device_gen_param),
+ KUNIT_CASE_PARAM(xe_bo_evict_kunit, xe_pci_live_device_gen_param),
+ {}
+};
+
+VISIBLE_IF_KUNIT
+struct kunit_suite xe_bo_test_suite = {
+ .name = "xe_bo",
+ .test_cases = xe_bo_tests,
+ .init = xe_kunit_helper_xe_device_live_test_init,
+};
+EXPORT_SYMBOL_IF_KUNIT(xe_bo_test_suite);
diff --git a/drivers/gpu/drm/xe/tests/xe_bo_test.c b/drivers/gpu/drm/xe/tests/xe_bo_test.c
deleted file mode 100644
index a324cde77db8..000000000000
--- a/drivers/gpu/drm/xe/tests/xe_bo_test.c
+++ /dev/null
@@ -1,21 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Copyright © 2022 Intel Corporation
- */
-
-#include "xe_bo_test.h"
-
-#include <kunit/test.h>
-
-static struct kunit_case xe_bo_tests[] = {
- KUNIT_CASE(xe_ccs_migrate_kunit),
- KUNIT_CASE(xe_bo_evict_kunit),
- {}
-};
-
-static struct kunit_suite xe_bo_test_suite = {
- .name = "xe_bo",
- .test_cases = xe_bo_tests,
-};
-
-kunit_test_suite(xe_bo_test_suite);
diff --git a/drivers/gpu/drm/xe/tests/xe_bo_test.h b/drivers/gpu/drm/xe/tests/xe_bo_test.h
deleted file mode 100644
index 0113ab45066a..000000000000
--- a/drivers/gpu/drm/xe/tests/xe_bo_test.h
+++ /dev/null
@@ -1,14 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 AND MIT */
-/*
- * Copyright © 2023 Intel Corporation
- */
-
-#ifndef _XE_BO_TEST_H_
-#define _XE_BO_TEST_H_
-
-struct kunit;
-
-void xe_ccs_migrate_kunit(struct kunit *test);
-void xe_bo_evict_kunit(struct kunit *test);
-
-#endif
diff --git a/drivers/gpu/drm/xe/tests/xe_dma_buf.c b/drivers/gpu/drm/xe/tests/xe_dma_buf.c
index e7f9b531c465..c24c8509227e 100644
--- a/drivers/gpu/drm/xe/tests/xe_dma_buf.c
+++ b/drivers/gpu/drm/xe/tests/xe_dma_buf.c
@@ -8,7 +8,7 @@
#include <kunit/test.h>
#include <kunit/visibility.h>
-#include "tests/xe_dma_buf_test.h"
+#include "tests/xe_kunit_helpers.h"
#include "tests/xe_pci_test.h"
#include "xe_pci.h"
@@ -107,7 +107,7 @@ static void check_residency(struct kunit *test, struct xe_bo *exported,
static void xe_test_dmabuf_import_same_driver(struct xe_device *xe)
{
- struct kunit *test = xe_cur_kunit();
+ struct kunit *test = kunit_get_current_test();
struct dma_buf_test_params *params = to_dma_buf_test_params(test->priv);
struct drm_gem_object *import;
struct dma_buf *dmabuf;
@@ -258,7 +258,7 @@ static const struct dma_buf_test_params test_params[] = {
static int dma_buf_run_device(struct xe_device *xe)
{
const struct dma_buf_test_params *params;
- struct kunit *test = xe_cur_kunit();
+ struct kunit *test = kunit_get_current_test();
xe_pm_runtime_get(xe);
for (params = test_params; params->mem_mask; ++params) {
@@ -274,8 +274,22 @@ static int dma_buf_run_device(struct xe_device *xe)
return 0;
}
-void xe_dma_buf_kunit(struct kunit *test)
+static void xe_dma_buf_kunit(struct kunit *test)
{
- xe_call_for_each_device(dma_buf_run_device);
+ struct xe_device *xe = test->priv;
+
+ dma_buf_run_device(xe);
}
-EXPORT_SYMBOL_IF_KUNIT(xe_dma_buf_kunit);
+
+static struct kunit_case xe_dma_buf_tests[] = {
+ KUNIT_CASE_PARAM(xe_dma_buf_kunit, xe_pci_live_device_gen_param),
+ {}
+};
+
+VISIBLE_IF_KUNIT
+struct kunit_suite xe_dma_buf_test_suite = {
+ .name = "xe_dma_buf",
+ .test_cases = xe_dma_buf_tests,
+ .init = xe_kunit_helper_xe_device_live_test_init,
+};
+EXPORT_SYMBOL_IF_KUNIT(xe_dma_buf_test_suite);
diff --git a/drivers/gpu/drm/xe/tests/xe_dma_buf_test.c b/drivers/gpu/drm/xe/tests/xe_dma_buf_test.c
deleted file mode 100644
index 99cdb718b6c6..000000000000
--- a/drivers/gpu/drm/xe/tests/xe_dma_buf_test.c
+++ /dev/null
@@ -1,20 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Copyright © 2022 Intel Corporation
- */
-
-#include "xe_dma_buf_test.h"
-
-#include <kunit/test.h>
-
-static struct kunit_case xe_dma_buf_tests[] = {
- KUNIT_CASE(xe_dma_buf_kunit),
- {}
-};
-
-static struct kunit_suite xe_dma_buf_test_suite = {
- .name = "xe_dma_buf",
- .test_cases = xe_dma_buf_tests,
-};
-
-kunit_test_suite(xe_dma_buf_test_suite);
diff --git a/drivers/gpu/drm/xe/tests/xe_dma_buf_test.h b/drivers/gpu/drm/xe/tests/xe_dma_buf_test.h
deleted file mode 100644
index e6b464ddd526..000000000000
--- a/drivers/gpu/drm/xe/tests/xe_dma_buf_test.h
+++ /dev/null
@@ -1,13 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 AND MIT */
-/*
- * Copyright © 2023 Intel Corporation
- */
-
-#ifndef _XE_DMA_BUF_TEST_H_
-#define _XE_DMA_BUF_TEST_H_
-
-struct kunit;
-
-void xe_dma_buf_kunit(struct kunit *test);
-
-#endif
diff --git a/drivers/gpu/drm/xe/tests/xe_kunit_helpers.c b/drivers/gpu/drm/xe/tests/xe_kunit_helpers.c
index fefe79b3b75a..bc5156966ce9 100644
--- a/drivers/gpu/drm/xe/tests/xe_kunit_helpers.c
+++ b/drivers/gpu/drm/xe/tests/xe_kunit_helpers.c
@@ -12,7 +12,9 @@
#include "tests/xe_kunit_helpers.h"
#include "tests/xe_pci_test.h"
+#include "xe_device.h"
#include "xe_device_types.h"
+#include "xe_pm.h"
/**
* xe_kunit_helper_alloc_xe_device - Allocate a &xe_device for a KUnit test.
@@ -88,3 +90,40 @@ int xe_kunit_helper_xe_device_test_init(struct kunit *test)
return 0;
}
EXPORT_SYMBOL_IF_KUNIT(xe_kunit_helper_xe_device_test_init);
+
+KUNIT_DEFINE_ACTION_WRAPPER(put_xe_pm_runtime, xe_pm_runtime_put, struct xe_device *);
+
+/**
+ * xe_kunit_helper_xe_device_live_test_init - Prepare a &xe_device for
+ * use in a live KUnit test.
+ * @test: the &kunit where live &xe_device will be used
+ *
+ * This function expects pointer to the &xe_device in the &test.param_value,
+ * like it is prepared by the &xe_pci_live_device_gen_param and stores that
+ * pointer as &kunit.priv to allow the test code to access it.
+ *
+ * This function makes sure that device is not wedged and then resumes it
+ * to avoid waking up the device inside the test. It uses deferred cleanup
+ * action to release a runtime_pm reference.
+ *
+ * This function can be used as custom implementation of &kunit_suite.init.
+ *
+ * This function uses KUNIT_ASSERT to detect any failures.
+ *
+ * Return: Always 0.
+ */
+int xe_kunit_helper_xe_device_live_test_init(struct kunit *test)
+{
+ struct xe_device *xe = xe_device_const_cast(test->param_value);
+
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, xe);
+ kunit_info(test, "running on %s device\n", xe->info.platform_name);
+
+ KUNIT_ASSERT_FALSE(test, xe_device_wedged(xe));
+ xe_pm_runtime_get(xe);
+ KUNIT_ASSERT_EQ(test, 0, kunit_add_action_or_reset(test, put_xe_pm_runtime, xe));
+
+ test->priv = xe;
+ return 0;
+}
+EXPORT_SYMBOL_IF_KUNIT(xe_kunit_helper_xe_device_live_test_init);
diff --git a/drivers/gpu/drm/xe/tests/xe_kunit_helpers.h b/drivers/gpu/drm/xe/tests/xe_kunit_helpers.h
index 067a1babf049..83665f7b1254 100644
--- a/drivers/gpu/drm/xe/tests/xe_kunit_helpers.h
+++ b/drivers/gpu/drm/xe/tests/xe_kunit_helpers.h
@@ -14,4 +14,6 @@ struct xe_device *xe_kunit_helper_alloc_xe_device(struct kunit *test,
struct device *dev);
int xe_kunit_helper_xe_device_test_init(struct kunit *test);
+int xe_kunit_helper_xe_device_live_test_init(struct kunit *test);
+
#endif
diff --git a/drivers/gpu/drm/xe/tests/xe_live_test_mod.c b/drivers/gpu/drm/xe/tests/xe_live_test_mod.c
index eb1ea99a5a8b..5f14737c8210 100644
--- a/drivers/gpu/drm/xe/tests/xe_live_test_mod.c
+++ b/drivers/gpu/drm/xe/tests/xe_live_test_mod.c
@@ -3,6 +3,17 @@
* Copyright © 2023 Intel Corporation
*/
#include <linux/module.h>
+#include <kunit/test.h>
+
+extern struct kunit_suite xe_bo_test_suite;
+extern struct kunit_suite xe_dma_buf_test_suite;
+extern struct kunit_suite xe_migrate_test_suite;
+extern struct kunit_suite xe_mocs_test_suite;
+
+kunit_test_suite(xe_bo_test_suite);
+kunit_test_suite(xe_dma_buf_test_suite);
+kunit_test_suite(xe_migrate_test_suite);
+kunit_test_suite(xe_mocs_test_suite);
MODULE_AUTHOR("Intel Corporation");
MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/xe/tests/xe_migrate.c b/drivers/gpu/drm/xe/tests/xe_migrate.c
index 962f6438e219..4344a1724029 100644
--- a/drivers/gpu/drm/xe/tests/xe_migrate.c
+++ b/drivers/gpu/drm/xe/tests/xe_migrate.c
@@ -6,7 +6,7 @@
#include <kunit/test.h>
#include <kunit/visibility.h>
-#include "tests/xe_migrate_test.h"
+#include "tests/xe_kunit_helpers.h"
#include "tests/xe_pci_test.h"
#include "xe_pci.h"
@@ -334,7 +334,7 @@ vunmap:
static int migrate_test_run_device(struct xe_device *xe)
{
- struct kunit *test = xe_cur_kunit();
+ struct kunit *test = kunit_get_current_test();
struct xe_tile *tile;
int id;
@@ -354,8 +354,422 @@ static int migrate_test_run_device(struct xe_device *xe)
return 0;
}
-void xe_migrate_sanity_kunit(struct kunit *test)
+static void xe_migrate_sanity_kunit(struct kunit *test)
{
- xe_call_for_each_device(migrate_test_run_device);
+ struct xe_device *xe = test->priv;
+
+ migrate_test_run_device(xe);
+}
+
+static struct dma_fence *blt_copy(struct xe_tile *tile,
+ struct xe_bo *src_bo, struct xe_bo *dst_bo,
+ bool copy_only_ccs, const char *str, struct kunit *test)
+{
+ struct xe_gt *gt = tile->primary_gt;
+ struct xe_migrate *m = tile->migrate;
+ struct xe_device *xe = gt_to_xe(gt);
+ struct dma_fence *fence = NULL;
+ u64 size = src_bo->size;
+ struct xe_res_cursor src_it, dst_it;
+ struct ttm_resource *src = src_bo->ttm.resource, *dst = dst_bo->ttm.resource;
+ u64 src_L0_ofs, dst_L0_ofs;
+ u32 src_L0_pt, dst_L0_pt;
+ u64 src_L0, dst_L0;
+ int err;
+ bool src_is_vram = mem_type_is_vram(src->mem_type);
+ bool dst_is_vram = mem_type_is_vram(dst->mem_type);
+
+ if (!src_is_vram)
+ xe_res_first_sg(xe_bo_sg(src_bo), 0, size, &src_it);
+ else
+ xe_res_first(src, 0, size, &src_it);
+
+ if (!dst_is_vram)
+ xe_res_first_sg(xe_bo_sg(dst_bo), 0, size, &dst_it);
+ else
+ xe_res_first(dst, 0, size, &dst_it);
+
+ while (size) {
+ u32 batch_size = 2; /* arb_clear() + MI_BATCH_BUFFER_END */
+ struct xe_sched_job *job;
+ struct xe_bb *bb;
+ u32 flush_flags = 0;
+ u32 update_idx;
+ u32 avail_pts = max_mem_transfer_per_pass(xe) / LEVEL0_PAGE_TABLE_ENCODE_SIZE;
+ u32 pte_flags;
+
+ src_L0 = xe_migrate_res_sizes(m, &src_it);
+ dst_L0 = xe_migrate_res_sizes(m, &dst_it);
+
+ src_L0 = min(src_L0, dst_L0);
+
+ pte_flags = src_is_vram ? (PTE_UPDATE_FLAG_IS_VRAM |
+ PTE_UPDATE_FLAG_IS_COMP_PTE) : 0;
+ batch_size += pte_update_size(m, pte_flags, src, &src_it, &src_L0,
+ &src_L0_ofs, &src_L0_pt, 0, 0,
+ avail_pts);
+
+ pte_flags = dst_is_vram ? (PTE_UPDATE_FLAG_IS_VRAM |
+ PTE_UPDATE_FLAG_IS_COMP_PTE) : 0;
+ batch_size += pte_update_size(m, pte_flags, dst, &dst_it, &src_L0,
+ &dst_L0_ofs, &dst_L0_pt, 0,
+ avail_pts, avail_pts);
+
+ /* Add copy commands size here */
+ batch_size += ((copy_only_ccs) ? 0 : EMIT_COPY_DW) +
+ ((xe_device_has_flat_ccs(xe) && copy_only_ccs) ? EMIT_COPY_CCS_DW : 0);
+
+ bb = xe_bb_new(gt, batch_size, xe->info.has_usm);
+ if (IS_ERR(bb)) {
+ err = PTR_ERR(bb);
+ goto err_sync;
+ }
+
+ if (src_is_vram)
+ xe_res_next(&src_it, src_L0);
+ else
+ emit_pte(m, bb, src_L0_pt, src_is_vram, false,
+ &src_it, src_L0, src);
+
+ if (dst_is_vram)
+ xe_res_next(&dst_it, src_L0);
+ else
+ emit_pte(m, bb, dst_L0_pt, dst_is_vram, false,
+ &dst_it, src_L0, dst);
+
+ bb->cs[bb->len++] = MI_BATCH_BUFFER_END;
+ update_idx = bb->len;
+ if (!copy_only_ccs)
+ emit_copy(gt, bb, src_L0_ofs, dst_L0_ofs, src_L0, XE_PAGE_SIZE);
+
+ if (copy_only_ccs)
+ flush_flags = xe_migrate_ccs_copy(m, bb, src_L0_ofs,
+ src_is_vram, dst_L0_ofs,
+ dst_is_vram, src_L0, dst_L0_ofs,
+ copy_only_ccs);
+
+ job = xe_bb_create_migration_job(m->q, bb,
+ xe_migrate_batch_base(m, xe->info.has_usm),
+ update_idx);
+ if (IS_ERR(job)) {
+ err = PTR_ERR(job);
+ goto err;
+ }
+
+ xe_sched_job_add_migrate_flush(job, flush_flags);
+
+ mutex_lock(&m->job_mutex);
+ xe_sched_job_arm(job);
+ dma_fence_put(fence);
+ fence = dma_fence_get(&job->drm.s_fence->finished);
+ xe_sched_job_push(job);
+
+ dma_fence_put(m->fence);
+ m->fence = dma_fence_get(fence);
+
+ mutex_unlock(&m->job_mutex);
+
+ xe_bb_free(bb, fence);
+ size -= src_L0;
+ continue;
+
+err:
+ xe_bb_free(bb, NULL);
+
+err_sync:
+ if (fence) {
+ dma_fence_wait(fence, false);
+ dma_fence_put(fence);
+ }
+ return ERR_PTR(err);
+ }
+
+ return fence;
+}
+
+static void test_migrate(struct xe_device *xe, struct xe_tile *tile,
+ struct xe_bo *sys_bo, struct xe_bo *vram_bo, struct xe_bo *ccs_bo,
+ struct kunit *test)
+{
+ struct dma_fence *fence;
+ u64 expected, retval;
+ long timeout;
+ long ret;
+
+ expected = 0xd0d0d0d0d0d0d0d0;
+ xe_map_memset(xe, &sys_bo->vmap, 0, 0xd0, sys_bo->size);
+
+ fence = blt_copy(tile, sys_bo, vram_bo, false, "Blit copy from sysmem to vram", test);
+ if (!sanity_fence_failed(xe, fence, "Blit copy from sysmem to vram", test)) {
+ retval = xe_map_rd(xe, &vram_bo->vmap, 0, u64);
+ if (retval == expected)
+ KUNIT_FAIL(test, "Sanity check failed: VRAM must have compressed value\n");
+ }
+ dma_fence_put(fence);
+
+ kunit_info(test, "Evict vram buffer object\n");
+ ret = xe_bo_evict(vram_bo, true);
+ if (ret) {
+ KUNIT_FAIL(test, "Failed to evict bo.\n");
+ return;
+ }
+
+ ret = xe_bo_vmap(vram_bo);
+ if (ret) {
+ KUNIT_FAIL(test, "Failed to vmap vram bo: %li\n", ret);
+ return;
+ }
+
+ retval = xe_map_rd(xe, &vram_bo->vmap, 0, u64);
+ check(retval, expected, "Clear evicted vram data first value", test);
+ retval = xe_map_rd(xe, &vram_bo->vmap, vram_bo->size - 8, u64);
+ check(retval, expected, "Clear evicted vram data last value", test);
+
+ fence = blt_copy(tile, vram_bo, ccs_bo,
+ true, "Blit surf copy from vram to sysmem", test);
+ if (!sanity_fence_failed(xe, fence, "Clear ccs buffer data", test)) {
+ retval = xe_map_rd(xe, &ccs_bo->vmap, 0, u64);
+ check(retval, 0, "Clear ccs data first value", test);
+
+ retval = xe_map_rd(xe, &ccs_bo->vmap, ccs_bo->size - 8, u64);
+ check(retval, 0, "Clear ccs data last value", test);
+ }
+ dma_fence_put(fence);
+
+ kunit_info(test, "Restore vram buffer object\n");
+ ret = xe_bo_validate(vram_bo, NULL, false);
+ if (ret) {
+ KUNIT_FAIL(test, "Failed to validate vram bo for: %li\n", ret);
+ return;
+ }
+
+ /* Sync all migration blits */
+ timeout = dma_resv_wait_timeout(vram_bo->ttm.base.resv,
+ DMA_RESV_USAGE_KERNEL,
+ true,
+ 5 * HZ);
+ if (timeout <= 0) {
+ KUNIT_FAIL(test, "Failed to sync bo eviction.\n");
+ return;
+ }
+
+ ret = xe_bo_vmap(vram_bo);
+ if (ret) {
+ KUNIT_FAIL(test, "Failed to vmap vram bo: %li\n", ret);
+ return;
+ }
+
+ retval = xe_map_rd(xe, &vram_bo->vmap, 0, u64);
+ check(retval, expected, "Restored value must be equal to initial value", test);
+ retval = xe_map_rd(xe, &vram_bo->vmap, vram_bo->size - 8, u64);
+ check(retval, expected, "Restored value must be equal to initial value", test);
+
+ fence = blt_copy(tile, vram_bo, ccs_bo,
+ true, "Blit surf copy from vram to sysmem", test);
+ if (!sanity_fence_failed(xe, fence, "Clear ccs buffer data", test)) {
+ retval = xe_map_rd(xe, &ccs_bo->vmap, 0, u64);
+ check(retval, 0, "Clear ccs data first value", test);
+ retval = xe_map_rd(xe, &ccs_bo->vmap, ccs_bo->size - 8, u64);
+ check(retval, 0, "Clear ccs data last value", test);
+ }
+ dma_fence_put(fence);
+}
+
+static void test_clear(struct xe_device *xe, struct xe_tile *tile,
+ struct xe_bo *sys_bo, struct xe_bo *vram_bo, struct kunit *test)
+{
+ struct dma_fence *fence;
+ u64 expected, retval;
+
+ expected = 0xd0d0d0d0d0d0d0d0;
+ xe_map_memset(xe, &sys_bo->vmap, 0, 0xd0, sys_bo->size);
+
+ fence = blt_copy(tile, sys_bo, vram_bo, false, "Blit copy from sysmem to vram", test);
+ if (!sanity_fence_failed(xe, fence, "Blit copy from sysmem to vram", test)) {
+ retval = xe_map_rd(xe, &vram_bo->vmap, 0, u64);
+ if (retval == expected)
+ KUNIT_FAIL(test, "Sanity check failed: VRAM must have compressed value\n");
+ }
+ dma_fence_put(fence);
+
+ fence = blt_copy(tile, vram_bo, sys_bo, false, "Blit copy from vram to sysmem", test);
+ if (!sanity_fence_failed(xe, fence, "Blit copy from vram to sysmem", test)) {
+ retval = xe_map_rd(xe, &sys_bo->vmap, 0, u64);
+ check(retval, expected, "Decompressed value must be equal to initial value", test);
+ retval = xe_map_rd(xe, &sys_bo->vmap, sys_bo->size - 8, u64);
+ check(retval, expected, "Decompressed value must be equal to initial value", test);
+ }
+ dma_fence_put(fence);
+
+ kunit_info(test, "Clear vram buffer object\n");
+ expected = 0x0000000000000000;
+ fence = xe_migrate_clear(tile->migrate, vram_bo, vram_bo->ttm.resource);
+ if (sanity_fence_failed(xe, fence, "Clear vram_bo", test))
+ return;
+ dma_fence_put(fence);
+
+ fence = blt_copy(tile, vram_bo, sys_bo,
+ false, "Blit copy from vram to sysmem", test);
+ if (!sanity_fence_failed(xe, fence, "Clear main buffer data", test)) {
+ retval = xe_map_rd(xe, &sys_bo->vmap, 0, u64);
+ check(retval, expected, "Clear main buffer first value", test);
+ retval = xe_map_rd(xe, &sys_bo->vmap, sys_bo->size - 8, u64);
+ check(retval, expected, "Clear main buffer last value", test);
+ }
+ dma_fence_put(fence);
+
+ fence = blt_copy(tile, vram_bo, sys_bo,
+ true, "Blit surf copy from vram to sysmem", test);
+ if (!sanity_fence_failed(xe, fence, "Clear ccs buffer data", test)) {
+ retval = xe_map_rd(xe, &sys_bo->vmap, 0, u64);
+ check(retval, expected, "Clear ccs data first value", test);
+ retval = xe_map_rd(xe, &sys_bo->vmap, sys_bo->size - 8, u64);
+ check(retval, expected, "Clear ccs data last value", test);
+ }
+ dma_fence_put(fence);
+}
+
+static void validate_ccs_test_run_tile(struct xe_device *xe, struct xe_tile *tile,
+ struct kunit *test)
+{
+ struct xe_bo *sys_bo, *vram_bo = NULL, *ccs_bo = NULL;
+ unsigned int bo_flags = XE_BO_FLAG_VRAM_IF_DGFX(tile);
+ long ret;
+
+ sys_bo = xe_bo_create_user(xe, NULL, NULL, SZ_4M,
+ DRM_XE_GEM_CPU_CACHING_WC, ttm_bo_type_device,
+ XE_BO_FLAG_SYSTEM | XE_BO_FLAG_NEEDS_CPU_ACCESS);
+
+ if (IS_ERR(sys_bo)) {
+ KUNIT_FAIL(test, "xe_bo_create() failed with err=%ld\n",
+ PTR_ERR(sys_bo));
+ return;
+ }
+
+ xe_bo_lock(sys_bo, false);
+ ret = xe_bo_validate(sys_bo, NULL, false);
+ if (ret) {
+ KUNIT_FAIL(test, "Failed to validate system bo for: %li\n", ret);
+ goto free_sysbo;
+ }
+
+ ret = xe_bo_vmap(sys_bo);
+ if (ret) {
+ KUNIT_FAIL(test, "Failed to vmap system bo: %li\n", ret);
+ goto free_sysbo;
+ }
+ xe_bo_unlock(sys_bo);
+
+ ccs_bo = xe_bo_create_user(xe, NULL, NULL, SZ_4M, DRM_XE_GEM_CPU_CACHING_WC,
+ ttm_bo_type_device, bo_flags | XE_BO_FLAG_NEEDS_CPU_ACCESS);
+
+ if (IS_ERR(ccs_bo)) {
+ KUNIT_FAIL(test, "xe_bo_create() failed with err=%ld\n",
+ PTR_ERR(ccs_bo));
+ return;
+ }
+
+ xe_bo_lock(ccs_bo, false);
+ ret = xe_bo_validate(ccs_bo, NULL, false);
+ if (ret) {
+ KUNIT_FAIL(test, "Failed to validate system bo for: %li\n", ret);
+ goto free_ccsbo;
+ }
+
+ ret = xe_bo_vmap(ccs_bo);
+ if (ret) {
+ KUNIT_FAIL(test, "Failed to vmap system bo: %li\n", ret);
+ goto free_ccsbo;
+ }
+ xe_bo_unlock(ccs_bo);
+
+ vram_bo = xe_bo_create_user(xe, NULL, NULL, SZ_4M, DRM_XE_GEM_CPU_CACHING_WC,
+ ttm_bo_type_device, bo_flags | XE_BO_FLAG_NEEDS_CPU_ACCESS);
+ if (IS_ERR(vram_bo)) {
+ KUNIT_FAIL(test, "xe_bo_create() failed with err=%ld\n",
+ PTR_ERR(vram_bo));
+ return;
+ }
+
+ xe_bo_lock(vram_bo, false);
+ ret = xe_bo_validate(vram_bo, NULL, false);
+ if (ret) {
+ KUNIT_FAIL(test, "Failed to validate vram bo for: %li\n", ret);
+ goto free_vrambo;
+ }
+
+ ret = xe_bo_vmap(vram_bo);
+ if (ret) {
+ KUNIT_FAIL(test, "Failed to vmap vram bo: %li\n", ret);
+ goto free_vrambo;
+ }
+
+ test_clear(xe, tile, sys_bo, vram_bo, test);
+ test_migrate(xe, tile, sys_bo, vram_bo, ccs_bo, test);
+ xe_bo_unlock(vram_bo);
+
+ xe_bo_lock(vram_bo, false);
+ xe_bo_vunmap(vram_bo);
+ xe_bo_unlock(vram_bo);
+
+ xe_bo_lock(ccs_bo, false);
+ xe_bo_vunmap(ccs_bo);
+ xe_bo_unlock(ccs_bo);
+
+ xe_bo_lock(sys_bo, false);
+ xe_bo_vunmap(sys_bo);
+ xe_bo_unlock(sys_bo);
+free_vrambo:
+ xe_bo_put(vram_bo);
+free_ccsbo:
+ xe_bo_put(ccs_bo);
+free_sysbo:
+ xe_bo_put(sys_bo);
+}
+
+static int validate_ccs_test_run_device(struct xe_device *xe)
+{
+ struct kunit *test = kunit_get_current_test();
+ struct xe_tile *tile;
+ int id;
+
+ if (!xe_device_has_flat_ccs(xe)) {
+ kunit_skip(test, "non-flat-ccs device\n");
+ return 0;
+ }
+
+ if (!(GRAPHICS_VER(xe) >= 20 && IS_DGFX(xe))) {
+ kunit_skip(test, "non-xe2 discrete device\n");
+ return 0;
+ }
+
+ xe_pm_runtime_get(xe);
+
+ for_each_tile(tile, xe, id)
+ validate_ccs_test_run_tile(xe, tile, test);
+
+ xe_pm_runtime_put(xe);
+
+ return 0;
}
-EXPORT_SYMBOL_IF_KUNIT(xe_migrate_sanity_kunit);
+
+static void xe_validate_ccs_kunit(struct kunit *test)
+{
+ struct xe_device *xe = test->priv;
+
+ validate_ccs_test_run_device(xe);
+}
+
+static struct kunit_case xe_migrate_tests[] = {
+ KUNIT_CASE_PARAM(xe_migrate_sanity_kunit, xe_pci_live_device_gen_param),
+ KUNIT_CASE_PARAM(xe_validate_ccs_kunit, xe_pci_live_device_gen_param),
+ {}
+};
+
+VISIBLE_IF_KUNIT
+struct kunit_suite xe_migrate_test_suite = {
+ .name = "xe_migrate",
+ .test_cases = xe_migrate_tests,
+ .init = xe_kunit_helper_xe_device_live_test_init,
+};
+EXPORT_SYMBOL_IF_KUNIT(xe_migrate_test_suite);
diff --git a/drivers/gpu/drm/xe/tests/xe_migrate_test.c b/drivers/gpu/drm/xe/tests/xe_migrate_test.c
deleted file mode 100644
index eb0d8963419c..000000000000
--- a/drivers/gpu/drm/xe/tests/xe_migrate_test.c
+++ /dev/null
@@ -1,20 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Copyright © 2022 Intel Corporation
- */
-
-#include "xe_migrate_test.h"
-
-#include <kunit/test.h>
-
-static struct kunit_case xe_migrate_tests[] = {
- KUNIT_CASE(xe_migrate_sanity_kunit),
- {}
-};
-
-static struct kunit_suite xe_migrate_test_suite = {
- .name = "xe_migrate",
- .test_cases = xe_migrate_tests,
-};
-
-kunit_test_suite(xe_migrate_test_suite);
diff --git a/drivers/gpu/drm/xe/tests/xe_migrate_test.h b/drivers/gpu/drm/xe/tests/xe_migrate_test.h
deleted file mode 100644
index 7c645c66824f..000000000000
--- a/drivers/gpu/drm/xe/tests/xe_migrate_test.h
+++ /dev/null
@@ -1,13 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 AND MIT */
-/*
- * Copyright © 2023 Intel Corporation
- */
-
-#ifndef _XE_MIGRATE_TEST_H_
-#define _XE_MIGRATE_TEST_H_
-
-struct kunit;
-
-void xe_migrate_sanity_kunit(struct kunit *test);
-
-#endif
diff --git a/drivers/gpu/drm/xe/tests/xe_mocs.c b/drivers/gpu/drm/xe/tests/xe_mocs.c
index 67c65e88c384..79be73b4a02b 100644
--- a/drivers/gpu/drm/xe/tests/xe_mocs.c
+++ b/drivers/gpu/drm/xe/tests/xe_mocs.c
@@ -6,7 +6,7 @@
#include <kunit/test.h>
#include <kunit/visibility.h>
-#include "tests/xe_mocs_test.h"
+#include "tests/xe_kunit_helpers.h"
#include "tests/xe_pci_test.h"
#include "tests/xe_test.h"
@@ -23,7 +23,7 @@ struct live_mocs {
static int live_mocs_init(struct live_mocs *arg, struct xe_gt *gt)
{
unsigned int flags;
- struct kunit *test = xe_cur_kunit();
+ struct kunit *test = kunit_get_current_test();
memset(arg, 0, sizeof(*arg));
@@ -41,7 +41,7 @@ static int live_mocs_init(struct live_mocs *arg, struct xe_gt *gt)
static void read_l3cc_table(struct xe_gt *gt,
const struct xe_mocs_info *info)
{
- struct kunit *test = xe_cur_kunit();
+ struct kunit *test = kunit_get_current_test();
u32 l3cc, l3cc_expected;
unsigned int i;
u32 reg_val;
@@ -78,7 +78,7 @@ static void read_l3cc_table(struct xe_gt *gt,
static void read_mocs_table(struct xe_gt *gt,
const struct xe_mocs_info *info)
{
- struct kunit *test = xe_cur_kunit();
+ struct kunit *test = kunit_get_current_test();
u32 mocs, mocs_expected;
unsigned int i;
u32 reg_val;
@@ -134,11 +134,15 @@ static int mocs_kernel_test_run_device(struct xe_device *xe)
return 0;
}
-void xe_live_mocs_kernel_kunit(struct kunit *test)
+static void xe_live_mocs_kernel_kunit(struct kunit *test)
{
- xe_call_for_each_device(mocs_kernel_test_run_device);
+ struct xe_device *xe = test->priv;
+
+ if (IS_SRIOV_VF(xe))
+ kunit_skip(test, "this test is N/A for VF");
+
+ mocs_kernel_test_run_device(xe);
}
-EXPORT_SYMBOL_IF_KUNIT(xe_live_mocs_kernel_kunit);
static int mocs_reset_test_run_device(struct xe_device *xe)
{
@@ -148,7 +152,7 @@ static int mocs_reset_test_run_device(struct xe_device *xe)
struct xe_gt *gt;
unsigned int flags;
int id;
- struct kunit *test = xe_cur_kunit();
+ struct kunit *test = kunit_get_current_test();
xe_pm_runtime_get(xe);
@@ -175,8 +179,26 @@ static int mocs_reset_test_run_device(struct xe_device *xe)
return 0;
}
-void xe_live_mocs_reset_kunit(struct kunit *test)
+static void xe_live_mocs_reset_kunit(struct kunit *test)
{
- xe_call_for_each_device(mocs_reset_test_run_device);
+ struct xe_device *xe = test->priv;
+
+ if (IS_SRIOV_VF(xe))
+ kunit_skip(test, "this test is N/A for VF");
+
+ mocs_reset_test_run_device(xe);
}
-EXPORT_SYMBOL_IF_KUNIT(xe_live_mocs_reset_kunit);
+
+static struct kunit_case xe_mocs_tests[] = {
+ KUNIT_CASE_PARAM(xe_live_mocs_kernel_kunit, xe_pci_live_device_gen_param),
+ KUNIT_CASE_PARAM(xe_live_mocs_reset_kunit, xe_pci_live_device_gen_param),
+ {}
+};
+
+VISIBLE_IF_KUNIT
+struct kunit_suite xe_mocs_test_suite = {
+ .name = "xe_mocs",
+ .test_cases = xe_mocs_tests,
+ .init = xe_kunit_helper_xe_device_live_test_init,
+};
+EXPORT_SYMBOL_IF_KUNIT(xe_mocs_test_suite);
diff --git a/drivers/gpu/drm/xe/tests/xe_mocs_test.c b/drivers/gpu/drm/xe/tests/xe_mocs_test.c
deleted file mode 100644
index 6315886b659e..000000000000
--- a/drivers/gpu/drm/xe/tests/xe_mocs_test.c
+++ /dev/null
@@ -1,21 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Copyright © 2022 Intel Corporation
- */
-
-#include "xe_mocs_test.h"
-
-#include <kunit/test.h>
-
-static struct kunit_case xe_mocs_tests[] = {
- KUNIT_CASE(xe_live_mocs_kernel_kunit),
- KUNIT_CASE(xe_live_mocs_reset_kunit),
- {}
-};
-
-static struct kunit_suite xe_mocs_test_suite = {
- .name = "xe_mocs",
- .test_cases = xe_mocs_tests,
-};
-
-kunit_test_suite(xe_mocs_test_suite);
diff --git a/drivers/gpu/drm/xe/tests/xe_mocs_test.h b/drivers/gpu/drm/xe/tests/xe_mocs_test.h
deleted file mode 100644
index e7699d495411..000000000000
--- a/drivers/gpu/drm/xe/tests/xe_mocs_test.h
+++ /dev/null
@@ -1,14 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 AND MIT */
-/*
- * Copyright © 2023 Intel Corporation
- */
-
-#ifndef _XE_MOCS_TEST_H_
-#define _XE_MOCS_TEST_H_
-
-struct kunit;
-
-void xe_live_mocs_kernel_kunit(struct kunit *test);
-void xe_live_mocs_reset_kunit(struct kunit *test);
-
-#endif
diff --git a/drivers/gpu/drm/xe/tests/xe_pci.c b/drivers/gpu/drm/xe/tests/xe_pci.c
index f62809ca8b51..577ee7d14381 100644
--- a/drivers/gpu/drm/xe/tests/xe_pci.c
+++ b/drivers/gpu/drm/xe/tests/xe_pci.c
@@ -167,3 +167,33 @@ done:
return 0;
}
EXPORT_SYMBOL_IF_KUNIT(xe_pci_fake_device_init);
+
+/**
+ * xe_pci_live_device_gen_param - Helper to iterate Xe devices as KUnit parameters
+ * @prev: the previously returned value, or NULL for the first iteration
+ * @desc: the buffer for a parameter name
+ *
+ * Iterates over the available Xe devices on the system. Uses the device name
+ * as the parameter name.
+ *
+ * To be used only as a parameter generator function in &KUNIT_CASE_PARAM.
+ *
+ * Return: pointer to the next &struct xe_device ready to be used as a parameter
+ * or NULL if there are no more Xe devices on the system.
+ */
+const void *xe_pci_live_device_gen_param(const void *prev, char *desc)
+{
+ const struct xe_device *xe = prev;
+ struct device *dev = xe ? xe->drm.dev : NULL;
+ struct device *next;
+
+ next = driver_find_next_device(&xe_pci_driver.driver, dev);
+ if (dev)
+ put_device(dev);
+ if (!next)
+ return NULL;
+
+ snprintf(desc, KUNIT_PARAM_DESC_SIZE, "%s", dev_name(next));
+ return pdev_to_xe_device(to_pci_dev(next));
+}
+EXPORT_SYMBOL_IF_KUNIT(xe_pci_live_device_gen_param);
diff --git a/drivers/gpu/drm/xe/tests/xe_pci_test.c b/drivers/gpu/drm/xe/tests/xe_pci_test.c
index a6705a536391..744a37583d2d 100644
--- a/drivers/gpu/drm/xe/tests/xe_pci_test.c
+++ b/drivers/gpu/drm/xe/tests/xe_pci_test.c
@@ -16,7 +16,7 @@
static void check_graphics_ip(const struct xe_graphics_desc *graphics)
{
- struct kunit *test = xe_cur_kunit();
+ struct kunit *test = kunit_get_current_test();
u64 mask = graphics->hw_engine_mask;
/* RCS, CCS, and BCS engines are allowed on the graphics IP */
@@ -30,7 +30,7 @@ static void check_graphics_ip(const struct xe_graphics_desc *graphics)
static void check_media_ip(const struct xe_media_desc *media)
{
- struct kunit *test = xe_cur_kunit();
+ struct kunit *test = kunit_get_current_test();
u64 mask = media->hw_engine_mask;
/* VCS, VECS and GSCCS engines are allowed on the media IP */
diff --git a/drivers/gpu/drm/xe/tests/xe_pci_test.h b/drivers/gpu/drm/xe/tests/xe_pci_test.h
index f40dcec83992..3e2558bc3c90 100644
--- a/drivers/gpu/drm/xe/tests/xe_pci_test.h
+++ b/drivers/gpu/drm/xe/tests/xe_pci_test.h
@@ -35,4 +35,6 @@ struct xe_pci_fake_data {
int xe_pci_fake_device_init(struct xe_device *xe);
+const void *xe_pci_live_device_gen_param(const void *prev, char *desc);
+
#endif
diff --git a/drivers/gpu/drm/xe/tests/xe_rtp_test.c b/drivers/gpu/drm/xe/tests/xe_rtp_test.c
index f217445c246a..36a3b5420fef 100644
--- a/drivers/gpu/drm/xe/tests/xe_rtp_test.c
+++ b/drivers/gpu/drm/xe/tests/xe_rtp_test.c
@@ -31,16 +31,23 @@
#undef XE_REG_MCR
#define XE_REG_MCR(...) XE_REG(__VA_ARGS__, .mcr = 1)
-struct rtp_test_case {
+struct rtp_to_sr_test_case {
const char *name;
struct xe_reg expected_reg;
u32 expected_set_bits;
u32 expected_clr_bits;
- unsigned long expected_count;
+ unsigned long expected_count_sr_entries;
unsigned int expected_sr_errors;
+ unsigned long expected_active;
const struct xe_rtp_entry_sr *entries;
};
+struct rtp_test_case {
+ const char *name;
+ unsigned long expected_active;
+ const struct xe_rtp_entry *entries;
+};
+
static bool match_yes(const struct xe_gt *gt, const struct xe_hw_engine *hwe)
{
return true;
@@ -51,13 +58,14 @@ static bool match_no(const struct xe_gt *gt, const struct xe_hw_engine *hwe)
return false;
}
-static const struct rtp_test_case cases[] = {
+static const struct rtp_to_sr_test_case rtp_to_sr_cases[] = {
{
.name = "coalesce-same-reg",
.expected_reg = REGULAR_REG1,
.expected_set_bits = REG_BIT(0) | REG_BIT(1),
.expected_clr_bits = REG_BIT(0) | REG_BIT(1),
- .expected_count = 1,
+ .expected_active = BIT(0) | BIT(1),
+ .expected_count_sr_entries = 1,
/* Different bits on the same register: create a single entry */
.entries = (const struct xe_rtp_entry_sr[]) {
{ XE_RTP_NAME("basic-1"),
@@ -76,7 +84,8 @@ static const struct rtp_test_case cases[] = {
.expected_reg = REGULAR_REG1,
.expected_set_bits = REG_BIT(0),
.expected_clr_bits = REG_BIT(0),
- .expected_count = 1,
+ .expected_active = BIT(0),
+ .expected_count_sr_entries = 1,
/* Don't coalesce second entry since rules don't match */
.entries = (const struct xe_rtp_entry_sr[]) {
{ XE_RTP_NAME("basic-1"),
@@ -95,7 +104,8 @@ static const struct rtp_test_case cases[] = {
.expected_reg = REGULAR_REG1,
.expected_set_bits = REG_BIT(0) | REG_BIT(1) | REG_BIT(2),
.expected_clr_bits = REG_BIT(0) | REG_BIT(1) | REG_BIT(2),
- .expected_count = 1,
+ .expected_active = BIT(0) | BIT(1) | BIT(2),
+ .expected_count_sr_entries = 1,
.entries = (const struct xe_rtp_entry_sr[]) {
{ XE_RTP_NAME("first"),
XE_RTP_RULES(FUNC(match_yes), OR, FUNC(match_no)),
@@ -121,7 +131,7 @@ static const struct rtp_test_case cases[] = {
{
.name = "match-or-xfail",
.expected_reg = REGULAR_REG1,
- .expected_count = 0,
+ .expected_count_sr_entries = 0,
.entries = (const struct xe_rtp_entry_sr[]) {
{ XE_RTP_NAME("leading-or"),
XE_RTP_RULES(OR, FUNC(match_yes)),
@@ -148,7 +158,8 @@ static const struct rtp_test_case cases[] = {
.expected_reg = REGULAR_REG1,
.expected_set_bits = REG_BIT(0),
.expected_clr_bits = REG_BIT(0),
- .expected_count = 1,
+ .expected_active = BIT(0),
+ .expected_count_sr_entries = 1,
/* Don't coalesce second entry due to one of the rules */
.entries = (const struct xe_rtp_entry_sr[]) {
{ XE_RTP_NAME("basic-1"),
@@ -167,7 +178,8 @@ static const struct rtp_test_case cases[] = {
.expected_reg = REGULAR_REG1,
.expected_set_bits = REG_BIT(0),
.expected_clr_bits = REG_BIT(0),
- .expected_count = 2,
+ .expected_active = BIT(0) | BIT(1),
+ .expected_count_sr_entries = 2,
/* Same bits on different registers are not coalesced */
.entries = (const struct xe_rtp_entry_sr[]) {
{ XE_RTP_NAME("basic-1"),
@@ -186,7 +198,8 @@ static const struct rtp_test_case cases[] = {
.expected_reg = REGULAR_REG1,
.expected_set_bits = REG_BIT(0),
.expected_clr_bits = REG_BIT(1) | REG_BIT(0),
- .expected_count = 1,
+ .expected_active = BIT(0) | BIT(1),
+ .expected_count_sr_entries = 1,
/* Check clr vs set actions on different bits */
.entries = (const struct xe_rtp_entry_sr[]) {
{ XE_RTP_NAME("basic-1"),
@@ -207,7 +220,8 @@ static const struct rtp_test_case cases[] = {
.expected_reg = REGULAR_REG1,
.expected_set_bits = TEMP_FIELD,
.expected_clr_bits = TEMP_MASK,
- .expected_count = 1,
+ .expected_active = BIT(0),
+ .expected_count_sr_entries = 1,
/* Check FIELD_SET works */
.entries = (const struct xe_rtp_entry_sr[]) {
{ XE_RTP_NAME("basic-1"),
@@ -225,7 +239,8 @@ static const struct rtp_test_case cases[] = {
.expected_reg = REGULAR_REG1,
.expected_set_bits = REG_BIT(0),
.expected_clr_bits = REG_BIT(0),
- .expected_count = 1,
+ .expected_active = BIT(0) | BIT(1),
+ .expected_count_sr_entries = 1,
.expected_sr_errors = 1,
.entries = (const struct xe_rtp_entry_sr[]) {
{ XE_RTP_NAME("basic-1"),
@@ -245,7 +260,8 @@ static const struct rtp_test_case cases[] = {
.expected_reg = REGULAR_REG1,
.expected_set_bits = REG_BIT(0),
.expected_clr_bits = REG_BIT(0),
- .expected_count = 1,
+ .expected_active = BIT(0) | BIT(1),
+ .expected_count_sr_entries = 1,
.expected_sr_errors = 1,
.entries = (const struct xe_rtp_entry_sr[]) {
{ XE_RTP_NAME("basic-1"),
@@ -265,7 +281,8 @@ static const struct rtp_test_case cases[] = {
.expected_reg = REGULAR_REG1,
.expected_set_bits = REG_BIT(0),
.expected_clr_bits = REG_BIT(0),
- .expected_count = 1,
+ .expected_active = BIT(0) | BIT(1) | BIT(2),
+ .expected_count_sr_entries = 1,
.expected_sr_errors = 2,
.entries = (const struct xe_rtp_entry_sr[]) {
{ XE_RTP_NAME("basic-1"),
@@ -287,28 +304,35 @@ static const struct rtp_test_case cases[] = {
},
};
-static void xe_rtp_process_tests(struct kunit *test)
+static void xe_rtp_process_to_sr_tests(struct kunit *test)
{
- const struct rtp_test_case *param = test->param_value;
+ const struct rtp_to_sr_test_case *param = test->param_value;
struct xe_device *xe = test->priv;
struct xe_gt *gt = xe_device_get_root_tile(xe)->primary_gt;
struct xe_reg_sr *reg_sr = &gt->reg_sr;
const struct xe_reg_sr_entry *sre, *sr_entry = NULL;
struct xe_rtp_process_ctx ctx = XE_RTP_PROCESS_CTX_INITIALIZER(gt);
- unsigned long idx, count = 0;
+ unsigned long idx, count_sr_entries = 0, count_rtp_entries = 0, active = 0;
+
+ xe_reg_sr_init(reg_sr, "xe_rtp_to_sr_tests", xe);
+
+ while (param->entries[count_rtp_entries].rules)
+ count_rtp_entries++;
- xe_reg_sr_init(reg_sr, "xe_rtp_tests", xe);
+ xe_rtp_process_ctx_enable_active_tracking(&ctx, &active, count_rtp_entries);
xe_rtp_process_to_sr(&ctx, param->entries, reg_sr);
xa_for_each(&reg_sr->xa, idx, sre) {
if (idx == param->expected_reg.addr)
sr_entry = sre;
- count++;
+ count_sr_entries++;
}
- KUNIT_EXPECT_EQ(test, count, param->expected_count);
- if (count) {
+ KUNIT_EXPECT_EQ(test, active, param->expected_active);
+
+ KUNIT_EXPECT_EQ(test, count_sr_entries, param->expected_count_sr_entries);
+ if (count_sr_entries) {
KUNIT_EXPECT_EQ(test, sr_entry->clr_bits, param->expected_clr_bits);
KUNIT_EXPECT_EQ(test, sr_entry->set_bits, param->expected_set_bits);
KUNIT_EXPECT_EQ(test, sr_entry->reg.raw, param->expected_reg.raw);
@@ -319,12 +343,162 @@ static void xe_rtp_process_tests(struct kunit *test)
KUNIT_EXPECT_EQ(test, reg_sr->errors, param->expected_sr_errors);
}
+/*
+ * Entries below follow the logic used with xe_wa_oob.rules:
+ * 1) Entries with empty name are OR'ed: all entries marked active since the
+ * last entry with a name
+ * 2) There are no action associated with rules
+ */
+static const struct rtp_test_case rtp_cases[] = {
+ {
+ .name = "active1",
+ .expected_active = BIT(0),
+ .entries = (const struct xe_rtp_entry[]) {
+ { XE_RTP_NAME("r1"),
+ XE_RTP_RULES(FUNC(match_yes)),
+ },
+ {}
+ },
+ },
+ {
+ .name = "active2",
+ .expected_active = BIT(0) | BIT(1),
+ .entries = (const struct xe_rtp_entry[]) {
+ { XE_RTP_NAME("r1"),
+ XE_RTP_RULES(FUNC(match_yes)),
+ },
+ { XE_RTP_NAME("r2"),
+ XE_RTP_RULES(FUNC(match_yes)),
+ },
+ {}
+ },
+ },
+ {
+ .name = "active-inactive",
+ .expected_active = BIT(0),
+ .entries = (const struct xe_rtp_entry[]) {
+ { XE_RTP_NAME("r1"),
+ XE_RTP_RULES(FUNC(match_yes)),
+ },
+ { XE_RTP_NAME("r2"),
+ XE_RTP_RULES(FUNC(match_no)),
+ },
+ {}
+ },
+ },
+ {
+ .name = "inactive-active",
+ .expected_active = BIT(1),
+ .entries = (const struct xe_rtp_entry[]) {
+ { XE_RTP_NAME("r1"),
+ XE_RTP_RULES(FUNC(match_no)),
+ },
+ { XE_RTP_NAME("r2"),
+ XE_RTP_RULES(FUNC(match_yes)),
+ },
+ {}
+ },
+ },
+ {
+ .name = "inactive-1st_or_active-inactive",
+ .expected_active = BIT(1),
+ .entries = (const struct xe_rtp_entry[]) {
+ { XE_RTP_NAME("r1"),
+ XE_RTP_RULES(FUNC(match_no)),
+ },
+ { XE_RTP_NAME("r2_or_conditions"),
+ XE_RTP_RULES(FUNC(match_yes), OR,
+ FUNC(match_no), OR,
+ FUNC(match_no)) },
+ { XE_RTP_NAME("r3"),
+ XE_RTP_RULES(FUNC(match_no)),
+ },
+ {}
+ },
+ },
+ {
+ .name = "inactive-2nd_or_active-inactive",
+ .expected_active = BIT(1),
+ .entries = (const struct xe_rtp_entry[]) {
+ { XE_RTP_NAME("r1"),
+ XE_RTP_RULES(FUNC(match_no)),
+ },
+ { XE_RTP_NAME("r2_or_conditions"),
+ XE_RTP_RULES(FUNC(match_no), OR,
+ FUNC(match_yes), OR,
+ FUNC(match_no)) },
+ { XE_RTP_NAME("r3"),
+ XE_RTP_RULES(FUNC(match_no)),
+ },
+ {}
+ },
+ },
+ {
+ .name = "inactive-last_or_active-inactive",
+ .expected_active = BIT(1),
+ .entries = (const struct xe_rtp_entry[]) {
+ { XE_RTP_NAME("r1"),
+ XE_RTP_RULES(FUNC(match_no)),
+ },
+ { XE_RTP_NAME("r2_or_conditions"),
+ XE_RTP_RULES(FUNC(match_no), OR,
+ FUNC(match_no), OR,
+ FUNC(match_yes)) },
+ { XE_RTP_NAME("r3"),
+ XE_RTP_RULES(FUNC(match_no)),
+ },
+ {}
+ },
+ },
+ {
+ .name = "inactive-no_or_active-inactive",
+ .expected_active = 0,
+ .entries = (const struct xe_rtp_entry[]) {
+ { XE_RTP_NAME("r1"),
+ XE_RTP_RULES(FUNC(match_no)),
+ },
+ { XE_RTP_NAME("r2_or_conditions"),
+ XE_RTP_RULES(FUNC(match_no), OR,
+ FUNC(match_no), OR,
+ FUNC(match_no)) },
+ { XE_RTP_NAME("r3"),
+ XE_RTP_RULES(FUNC(match_no)),
+ },
+ {}
+ },
+ },
+};
+
+static void xe_rtp_process_tests(struct kunit *test)
+{
+ const struct rtp_test_case *param = test->param_value;
+ struct xe_device *xe = test->priv;
+ struct xe_gt *gt = xe_device_get_root_tile(xe)->primary_gt;
+ struct xe_rtp_process_ctx ctx = XE_RTP_PROCESS_CTX_INITIALIZER(gt);
+ unsigned long count_rtp_entries = 0, active = 0;
+
+ while (param->entries[count_rtp_entries].rules)
+ count_rtp_entries++;
+
+ xe_rtp_process_ctx_enable_active_tracking(&ctx, &active, count_rtp_entries);
+ xe_rtp_process(&ctx, param->entries);
+
+ KUNIT_EXPECT_EQ(test, active, param->expected_active);
+}
+
+static void rtp_to_sr_desc(const struct rtp_to_sr_test_case *t, char *desc)
+{
+ strscpy(desc, t->name, KUNIT_PARAM_DESC_SIZE);
+}
+
+KUNIT_ARRAY_PARAM(rtp_to_sr, rtp_to_sr_cases, rtp_to_sr_desc);
+
static void rtp_desc(const struct rtp_test_case *t, char *desc)
{
strscpy(desc, t->name, KUNIT_PARAM_DESC_SIZE);
}
-KUNIT_ARRAY_PARAM(rtp, cases, rtp_desc);
+KUNIT_ARRAY_PARAM(rtp, rtp_cases, rtp_desc);
static int xe_rtp_test_init(struct kunit *test)
{
@@ -357,6 +531,7 @@ static void xe_rtp_test_exit(struct kunit *test)
}
static struct kunit_case xe_rtp_tests[] = {
+ KUNIT_CASE_PARAM(xe_rtp_process_to_sr_tests, rtp_to_sr_gen_params),
KUNIT_CASE_PARAM(xe_rtp_process_tests, rtp_gen_params),
{}
};
diff --git a/drivers/gpu/drm/xe/tests/xe_test.h b/drivers/gpu/drm/xe/tests/xe_test.h
index 7a1ae213e750..9c23ad9dba8d 100644
--- a/drivers/gpu/drm/xe/tests/xe_test.h
+++ b/drivers/gpu/drm/xe/tests/xe_test.h
@@ -9,8 +9,8 @@
#include <linux/types.h>
#if IS_ENABLED(CONFIG_DRM_XE_KUNIT_TEST)
-#include <linux/sched.h>
#include <kunit/test.h>
+#include <kunit/test-bug.h>
/*
* Each test that provides a kunit private test structure, place a test id
@@ -31,8 +31,6 @@ struct xe_test_priv {
#define XE_TEST_DECLARE(x) x
#define XE_TEST_ONLY(x) unlikely(x)
-#define XE_TEST_EXPORT
-#define xe_cur_kunit() current->kunit_test
/**
* xe_cur_kunit_priv - Obtain the struct xe_test_priv pointed to by
@@ -48,10 +46,10 @@ xe_cur_kunit_priv(enum xe_test_priv_id id)
{
struct xe_test_priv *priv;
- if (!xe_cur_kunit())
+ if (!kunit_get_current_test())
return NULL;
- priv = xe_cur_kunit()->priv;
+ priv = kunit_get_current_test()->priv;
return priv->id == id ? priv : NULL;
}
@@ -59,8 +57,6 @@ xe_cur_kunit_priv(enum xe_test_priv_id id)
#define XE_TEST_DECLARE(x)
#define XE_TEST_ONLY(x) 0
-#define XE_TEST_EXPORT static
-#define xe_cur_kunit() NULL
#define xe_cur_kunit_priv(_id) NULL
#endif
diff --git a/drivers/gpu/drm/xe/tests/xe_wa_test.c b/drivers/gpu/drm/xe/tests/xe_wa_test.c
index 9d0c715142b9..c96d1fe34151 100644
--- a/drivers/gpu/drm/xe/tests/xe_wa_test.c
+++ b/drivers/gpu/drm/xe/tests/xe_wa_test.c
@@ -74,6 +74,7 @@ static const struct platform_test_case cases[] = {
GMDID_CASE(METEORLAKE, 1274, A0, 1300, A0),
GMDID_CASE(LUNARLAKE, 2004, A0, 2000, A0),
GMDID_CASE(LUNARLAKE, 2004, B0, 2000, A0),
+ GMDID_CASE(BATTLEMAGE, 2001, A0, 1301, A1),
};
static void platform_desc(const struct platform_test_case *t, char *desc)
diff --git a/drivers/gpu/drm/xe/xe_bo.c b/drivers/gpu/drm/xe/xe_bo.c
index 31192d983d9e..3295bc92d7aa 100644
--- a/drivers/gpu/drm/xe/xe_bo.c
+++ b/drivers/gpu/drm/xe/xe_bo.c
@@ -1264,13 +1264,14 @@ struct xe_bo *___xe_bo_create_locked(struct xe_device *xe, struct xe_bo *bo,
if (flags & (XE_BO_FLAG_VRAM_MASK | XE_BO_FLAG_STOLEN) &&
!(flags & XE_BO_FLAG_IGNORE_MIN_PAGE_SIZE) &&
((xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K) ||
- (flags & XE_BO_NEEDS_64K))) {
- aligned_size = ALIGN(size, SZ_64K);
+ (flags & (XE_BO_FLAG_NEEDS_64K | XE_BO_FLAG_NEEDS_2M)))) {
+ size_t align = flags & XE_BO_FLAG_NEEDS_2M ? SZ_2M : SZ_64K;
+
+ aligned_size = ALIGN(size, align);
if (type != ttm_bo_type_device)
- size = ALIGN(size, SZ_64K);
+ size = ALIGN(size, align);
flags |= XE_BO_FLAG_INTERNAL_64K;
- alignment = SZ_64K >> PAGE_SHIFT;
-
+ alignment = align >> PAGE_SHIFT;
} else {
aligned_size = ALIGN(size, SZ_4K);
flags &= ~XE_BO_FLAG_INTERNAL_64K;
diff --git a/drivers/gpu/drm/xe/xe_bo.h b/drivers/gpu/drm/xe/xe_bo.h
index 6de894c728f5..1c9dc8adaaa3 100644
--- a/drivers/gpu/drm/xe/xe_bo.h
+++ b/drivers/gpu/drm/xe/xe_bo.h
@@ -36,8 +36,9 @@
#define XE_BO_FLAG_PAGETABLE BIT(12)
#define XE_BO_FLAG_NEEDS_CPU_ACCESS BIT(13)
#define XE_BO_FLAG_NEEDS_UC BIT(14)
-#define XE_BO_NEEDS_64K BIT(15)
-#define XE_BO_FLAG_GGTT_INVALIDATE BIT(16)
+#define XE_BO_FLAG_NEEDS_64K BIT(15)
+#define XE_BO_FLAG_NEEDS_2M BIT(16)
+#define XE_BO_FLAG_GGTT_INVALIDATE BIT(17)
/* this one is trigger internally only */
#define XE_BO_FLAG_INTERNAL_TEST BIT(30)
#define XE_BO_FLAG_INTERNAL_64K BIT(31)
diff --git a/drivers/gpu/drm/xe/xe_bo_types.h b/drivers/gpu/drm/xe/xe_bo_types.h
index 10450f1fbbde..ebc8abf7930a 100644
--- a/drivers/gpu/drm/xe/xe_bo_types.h
+++ b/drivers/gpu/drm/xe/xe_bo_types.h
@@ -58,6 +58,8 @@ struct xe_bo {
#endif
/** @freed: List node for delayed put. */
struct llist_node freed;
+ /** @update_index: Update index if PT BO */
+ int update_index;
/** @created: Whether the bo has passed initial creation */
bool created;
diff --git a/drivers/gpu/drm/xe/xe_devcoredump.c b/drivers/gpu/drm/xe/xe_devcoredump.c
index 62c2b10fbf1d..d8d8ca2c19d3 100644
--- a/drivers/gpu/drm/xe/xe_devcoredump.c
+++ b/drivers/gpu/drm/xe/xe_devcoredump.c
@@ -171,7 +171,6 @@ static void devcoredump_snapshot(struct xe_devcoredump *coredump,
u32 adj_logical_mask = q->logical_mask;
u32 width_mask = (0x1 << q->width) - 1;
const char *process_name = "no process";
- struct task_struct *task = NULL;
int i;
bool cookie;
@@ -179,14 +178,9 @@ static void devcoredump_snapshot(struct xe_devcoredump *coredump,
ss->snapshot_time = ktime_get_real();
ss->boot_time = ktime_get_boottime();
- if (q->vm && q->vm->xef) {
- task = get_pid_task(q->vm->xef->drm->pid, PIDTYPE_PID);
- if (task)
- process_name = task->comm;
- }
+ if (q->vm && q->vm->xef)
+ process_name = q->vm->xef->process_name;
strscpy(ss->process_name, process_name);
- if (task)
- put_task_struct(task);
ss->gt = q->gt;
INIT_WORK(&ss->work, xe_devcoredump_deferred_snap_work);
diff --git a/drivers/gpu/drm/xe/xe_device.c b/drivers/gpu/drm/xe/xe_device.c
index 76109415eba6..1aba6f9eaa19 100644
--- a/drivers/gpu/drm/xe/xe_device.c
+++ b/drivers/gpu/drm/xe/xe_device.c
@@ -54,6 +54,9 @@
#include "xe_vm.h"
#include "xe_vram.h"
#include "xe_wait_user_fence.h"
+#include "xe_wa.h"
+
+#include <generated/xe_wa_oob.h>
static int xe_file_open(struct drm_device *dev, struct drm_file *file)
{
@@ -61,6 +64,7 @@ static int xe_file_open(struct drm_device *dev, struct drm_file *file)
struct xe_drm_client *client;
struct xe_file *xef;
int ret = -ENOMEM;
+ struct task_struct *task = NULL;
xef = kzalloc(sizeof(*xef), GFP_KERNEL);
if (!xef)
@@ -87,9 +91,63 @@ static int xe_file_open(struct drm_device *dev, struct drm_file *file)
spin_unlock(&xe->clients.lock);
file->driver_priv = xef;
+ kref_init(&xef->refcount);
+
+ task = get_pid_task(rcu_access_pointer(file->pid), PIDTYPE_PID);
+ if (task) {
+ xef->process_name = kstrdup(task->comm, GFP_KERNEL);
+ xef->pid = task->pid;
+ put_task_struct(task);
+ }
+
return 0;
}
+static void xe_file_destroy(struct kref *ref)
+{
+ struct xe_file *xef = container_of(ref, struct xe_file, refcount);
+ struct xe_device *xe = xef->xe;
+
+ xa_destroy(&xef->exec_queue.xa);
+ mutex_destroy(&xef->exec_queue.lock);
+ xa_destroy(&xef->vm.xa);
+ mutex_destroy(&xef->vm.lock);
+
+ spin_lock(&xe->clients.lock);
+ xe->clients.count--;
+ spin_unlock(&xe->clients.lock);
+
+ xe_drm_client_put(xef->client);
+ kfree(xef->process_name);
+ kfree(xef);
+}
+
+/**
+ * xe_file_get() - Take a reference to the xe file object
+ * @xef: Pointer to the xe file
+ *
+ * Anyone with a pointer to xef must take a reference to the xe file
+ * object using this call.
+ *
+ * Return: xe file pointer
+ */
+struct xe_file *xe_file_get(struct xe_file *xef)
+{
+ kref_get(&xef->refcount);
+ return xef;
+}
+
+/**
+ * xe_file_put() - Drop a reference to the xe file object
+ * @xef: Pointer to the xe file
+ *
+ * Used to drop reference to the xef object
+ */
+void xe_file_put(struct xe_file *xef)
+{
+ kref_put(&xef->refcount, xe_file_destroy);
+}
+
static void xe_file_close(struct drm_device *dev, struct drm_file *file)
{
struct xe_device *xe = to_xe_device(dev);
@@ -98,6 +156,8 @@ static void xe_file_close(struct drm_device *dev, struct drm_file *file)
struct xe_exec_queue *q;
unsigned long idx;
+ xe_pm_runtime_get(xe);
+
/*
* No need for exec_queue.lock here as there is no contention for it
* when FD is closing as IOCTLs presumably can't be modifying the
@@ -108,21 +168,14 @@ static void xe_file_close(struct drm_device *dev, struct drm_file *file)
xe_exec_queue_kill(q);
xe_exec_queue_put(q);
}
- xa_destroy(&xef->exec_queue.xa);
- mutex_destroy(&xef->exec_queue.lock);
mutex_lock(&xef->vm.lock);
xa_for_each(&xef->vm.xa, idx, vm)
xe_vm_close_and_put(vm);
mutex_unlock(&xef->vm.lock);
- xa_destroy(&xef->vm.xa);
- mutex_destroy(&xef->vm.lock);
- spin_lock(&xe->clients.lock);
- xe->clients.count--;
- spin_unlock(&xe->clients.lock);
+ xe_file_put(xef);
- xe_drm_client_put(xef->client);
- kfree(xef);
+ xe_pm_runtime_put(xe);
}
static const struct drm_ioctl_desc xe_ioctls[] = {
@@ -744,13 +797,22 @@ void xe_device_shutdown(struct xe_device *xe)
{
}
+/**
+ * xe_device_wmb() - Device specific write memory barrier
+ * @xe: the &xe_device
+ *
+ * While wmb() is sufficient for a barrier if we use system memory, on discrete
+ * platforms with device memory we additionally need to issue a register write.
+ * Since it doesn't matter which register we write to, use the read-only VF_CAP
+ * register that is also marked as accessible by the VFs.
+ */
void xe_device_wmb(struct xe_device *xe)
{
struct xe_gt *gt = xe_root_mmio_gt(xe);
wmb();
if (IS_DGFX(xe))
- xe_mmio_write32(gt, SOFTWARE_FLAGS_SPR33, 0);
+ xe_mmio_write32(gt, VF_CAP_REG, 0);
}
/**
@@ -779,6 +841,11 @@ void xe_device_td_flush(struct xe_device *xe)
if (!IS_DGFX(xe) || GRAPHICS_VER(xe) < 20)
return;
+ if (XE_WA(xe_root_mmio_gt(xe), 16023588340)) {
+ xe_device_l2_flush(xe);
+ return;
+ }
+
for_each_gt(gt, xe, id) {
if (xe_gt_is_media_type(gt))
continue;
@@ -802,6 +869,30 @@ void xe_device_td_flush(struct xe_device *xe)
}
}
+void xe_device_l2_flush(struct xe_device *xe)
+{
+ struct xe_gt *gt;
+ int err;
+
+ gt = xe_root_mmio_gt(xe);
+
+ if (!XE_WA(gt, 16023588340))
+ return;
+
+ err = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
+ if (err)
+ return;
+
+ spin_lock(&gt->global_invl_lock);
+ xe_mmio_write32(gt, XE2_GLOBAL_INVAL, 0x1);
+
+ if (xe_mmio_wait32(gt, XE2_GLOBAL_INVAL, 0x1, 0x0, 150, NULL, true))
+ xe_gt_err_once(gt, "Global invalidation timeout\n");
+ spin_unlock(&gt->global_invl_lock);
+
+ xe_force_wake_put(gt_to_fw(gt), XE_FW_GT);
+}
+
u32 xe_device_ccs_bytes(struct xe_device *xe, u64 size)
{
return xe_device_has_flat_ccs(xe) ?
diff --git a/drivers/gpu/drm/xe/xe_device.h b/drivers/gpu/drm/xe/xe_device.h
index bb07f5669dbb..db6cc8d0d6b8 100644
--- a/drivers/gpu/drm/xe/xe_device.h
+++ b/drivers/gpu/drm/xe/xe_device.h
@@ -20,6 +20,11 @@ static inline struct xe_device *pdev_to_xe_device(struct pci_dev *pdev)
return pci_get_drvdata(pdev);
}
+static inline struct xe_device *xe_device_const_cast(const struct xe_device *xe)
+{
+ return (struct xe_device *)xe;
+}
+
static inline struct xe_device *ttm_to_xe_device(struct ttm_device *ttm)
{
return container_of(ttm, struct xe_device, ttm);
@@ -162,6 +167,7 @@ u64 xe_device_canonicalize_addr(struct xe_device *xe, u64 address);
u64 xe_device_uncanonicalize_addr(struct xe_device *xe, u64 address);
void xe_device_td_flush(struct xe_device *xe);
+void xe_device_l2_flush(struct xe_device *xe);
static inline bool xe_device_wedged(struct xe_device *xe)
{
@@ -170,4 +176,7 @@ static inline bool xe_device_wedged(struct xe_device *xe)
void xe_device_declare_wedged(struct xe_device *xe);
+struct xe_file *xe_file_get(struct xe_file *xef);
+void xe_file_put(struct xe_file *xef);
+
#endif
diff --git a/drivers/gpu/drm/xe/xe_device_types.h b/drivers/gpu/drm/xe/xe_device_types.h
index 3bca6d344744..5b7292a9a66d 100644
--- a/drivers/gpu/drm/xe/xe_device_types.h
+++ b/drivers/gpu/drm/xe/xe_device_types.h
@@ -23,6 +23,10 @@
#include "xe_sriov_types.h"
#include "xe_step_types.h"
+#if IS_ENABLED(CONFIG_DRM_XE_DEBUG)
+#define TEST_VM_OPS_ERROR
+#endif
+
#if IS_ENABLED(CONFIG_DRM_XE_DISPLAY)
#include "soc/intel_pch.h"
#include "intel_display_core.h"
@@ -40,6 +44,7 @@ struct xe_pat_ops;
#define MEDIA_VERx100(xe) ((xe)->info.media_verx100)
#define IS_DGFX(xe) ((xe)->info.is_dgfx)
#define HAS_HECI_GSCFI(xe) ((xe)->info.has_heci_gscfi)
+#define HAS_HECI_CSCFI(xe) ((xe)->info.has_heci_cscfi)
#define XE_VRAM_FLAGS_NEED64K BIT(0)
@@ -285,6 +290,8 @@ struct xe_device {
u8 skip_pcode:1;
/** @info.has_heci_gscfi: device has heci gscfi */
u8 has_heci_gscfi:1;
+ /** @info.has_heci_cscfi: device has heci cscfi */
+ u8 has_heci_cscfi:1;
/** @info.skip_guc_pc: Skip GuC based PM feature init */
u8 skip_guc_pc:1;
/** @info.has_atomic_enable_pte_bit: Device has atomic enable PTE bit */
@@ -477,6 +484,14 @@ struct xe_device {
int mode;
} wedged;
+#ifdef TEST_VM_OPS_ERROR
+ /**
+ * @vm_inject_error_position: inject errors at different places in VM
+ * bind IOCTL based on this value
+ */
+ u8 vm_inject_error_position;
+#endif
+
/* private: */
#if IS_ENABLED(CONFIG_DRM_XE_DISPLAY)
@@ -566,6 +581,21 @@ struct xe_file {
/** @client: drm client */
struct xe_drm_client *client;
+
+ /**
+ * @process_name: process name for file handle, used to safely output
+ * during error situations where xe file can outlive process
+ */
+ char *process_name;
+
+ /**
+ * @pid: pid for file handle, used to safely output uring error
+ * situations where xe file can outlive process
+ */
+ pid_t pid;
+
+ /** @refcount: ref count of this xe file */
+ struct kref refcount;
};
#endif
diff --git a/drivers/gpu/drm/xe/xe_drm_client.c b/drivers/gpu/drm/xe/xe_drm_client.c
index 6a26923fa10e..7ddd59908334 100644
--- a/drivers/gpu/drm/xe/xe_drm_client.c
+++ b/drivers/gpu/drm/xe/xe_drm_client.c
@@ -251,11 +251,8 @@ static void show_run_ticks(struct drm_printer *p, struct drm_file *file)
/* Accumulate all the exec queues from this client */
mutex_lock(&xef->exec_queue.lock);
- xa_for_each(&xef->exec_queue.xa, i, q) {
+ xa_for_each(&xef->exec_queue.xa, i, q)
xe_exec_queue_update_run_ticks(q);
- xef->run_ticks[q->class] += q->run_ticks - q->old_run_ticks;
- q->old_run_ticks = q->run_ticks;
- }
mutex_unlock(&xef->exec_queue.lock);
/* Get the total GPU cycles */
diff --git a/drivers/gpu/drm/xe/xe_exec_queue.c b/drivers/gpu/drm/xe/xe_exec_queue.c
index 0ba37835849b..69867a7b7c77 100644
--- a/drivers/gpu/drm/xe/xe_exec_queue.c
+++ b/drivers/gpu/drm/xe/xe_exec_queue.c
@@ -37,6 +37,10 @@ static void __xe_exec_queue_free(struct xe_exec_queue *q)
{
if (q->vm)
xe_vm_put(q->vm);
+
+ if (q->xef)
+ xe_file_put(q->xef);
+
kfree(q);
}
@@ -649,6 +653,7 @@ int xe_exec_queue_create_ioctl(struct drm_device *dev, void *data,
goto kill_exec_queue;
args->exec_queue_id = id;
+ q->xef = xe_file_get(xef);
return 0;
@@ -762,6 +767,7 @@ bool xe_exec_queue_is_idle(struct xe_exec_queue *q)
*/
void xe_exec_queue_update_run_ticks(struct xe_exec_queue *q)
{
+ struct xe_file *xef;
struct xe_lrc *lrc;
u32 old_ts, new_ts;
@@ -773,6 +779,8 @@ void xe_exec_queue_update_run_ticks(struct xe_exec_queue *q)
if (!q->vm || !q->vm->xef)
return;
+ xef = q->vm->xef;
+
/*
* Only sample the first LRC. For parallel submission, all of them are
* scheduled together and we compensate that below by multiplying by
@@ -783,7 +791,7 @@ void xe_exec_queue_update_run_ticks(struct xe_exec_queue *q)
*/
lrc = q->lrc[0];
new_ts = xe_lrc_update_timestamp(lrc, &old_ts);
- q->run_ticks += (new_ts - old_ts) * q->width;
+ xef->run_ticks[q->class] += (new_ts - old_ts) * q->width;
}
void xe_exec_queue_kill(struct xe_exec_queue *q)
@@ -906,3 +914,26 @@ void xe_exec_queue_last_fence_set(struct xe_exec_queue *q, struct xe_vm *vm,
xe_exec_queue_last_fence_put(q, vm);
q->last_fence = dma_fence_get(fence);
}
+
+/**
+ * xe_exec_queue_last_fence_test_dep - Test last fence dependency of queue
+ * @q: The exec queue
+ * @vm: The VM the engine does a bind or exec for
+ *
+ * Returns:
+ * -ETIME if there exists an unsignalled last fence dependency, zero otherwise.
+ */
+int xe_exec_queue_last_fence_test_dep(struct xe_exec_queue *q, struct xe_vm *vm)
+{
+ struct dma_fence *fence;
+ int err = 0;
+
+ fence = xe_exec_queue_last_fence_get(q, vm);
+ if (fence) {
+ err = test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags) ?
+ 0 : -ETIME;
+ dma_fence_put(fence);
+ }
+
+ return err;
+}
diff --git a/drivers/gpu/drm/xe/xe_exec_queue.h b/drivers/gpu/drm/xe/xe_exec_queue.h
index 289a3a51d2a2..ded77b0f3b90 100644
--- a/drivers/gpu/drm/xe/xe_exec_queue.h
+++ b/drivers/gpu/drm/xe/xe_exec_queue.h
@@ -75,6 +75,8 @@ struct dma_fence *xe_exec_queue_last_fence_get(struct xe_exec_queue *e,
struct xe_vm *vm);
void xe_exec_queue_last_fence_set(struct xe_exec_queue *e, struct xe_vm *vm,
struct dma_fence *fence);
+int xe_exec_queue_last_fence_test_dep(struct xe_exec_queue *q,
+ struct xe_vm *vm);
void xe_exec_queue_update_run_ticks(struct xe_exec_queue *q);
#endif
diff --git a/drivers/gpu/drm/xe/xe_exec_queue_types.h b/drivers/gpu/drm/xe/xe_exec_queue_types.h
index 201588ec33c3..1408b02eea53 100644
--- a/drivers/gpu/drm/xe/xe_exec_queue_types.h
+++ b/drivers/gpu/drm/xe/xe_exec_queue_types.h
@@ -38,6 +38,9 @@ enum xe_exec_queue_priority {
* a kernel object.
*/
struct xe_exec_queue {
+ /** @xef: Back pointer to xe file if this is user created exec queue */
+ struct xe_file *xef;
+
/** @gt: graphics tile this exec queue can submit to */
struct xe_gt *gt;
/**
@@ -139,10 +142,6 @@ struct xe_exec_queue {
* Protected by @vm's resv. Unused if @vm == NULL.
*/
u64 tlb_flush_seqno;
- /** @old_run_ticks: prior hw engine class run time in ticks for this exec queue */
- u64 old_run_ticks;
- /** @run_ticks: hw engine class run time in ticks for this exec queue */
- u64 run_ticks;
/** @lrc: logical ring context for this exec queue */
struct xe_lrc *lrc[];
};
@@ -172,9 +171,11 @@ struct xe_exec_queue_ops {
int (*suspend)(struct xe_exec_queue *q);
/**
* @suspend_wait: Wait for an exec queue to suspend executing, should be
- * call after suspend.
+ * call after suspend. In dma-fencing path thus must return within a
+ * reasonable amount of time. -ETIME return shall indicate an error
+ * waiting for suspend resulting in associated VM getting killed.
*/
- void (*suspend_wait)(struct xe_exec_queue *q);
+ int (*suspend_wait)(struct xe_exec_queue *q);
/**
* @resume: Resume exec queue execution, exec queue must be in a suspended
* state and dma fence returned from most recent suspend call must be
diff --git a/drivers/gpu/drm/xe/xe_execlist.c b/drivers/gpu/drm/xe/xe_execlist.c
index db906117db6d..7502e3486eaf 100644
--- a/drivers/gpu/drm/xe/xe_execlist.c
+++ b/drivers/gpu/drm/xe/xe_execlist.c
@@ -422,10 +422,11 @@ static int execlist_exec_queue_suspend(struct xe_exec_queue *q)
return 0;
}
-static void execlist_exec_queue_suspend_wait(struct xe_exec_queue *q)
+static int execlist_exec_queue_suspend_wait(struct xe_exec_queue *q)
{
/* NIY */
+ return 0;
}
static void execlist_exec_queue_resume(struct xe_exec_queue *q)
diff --git a/drivers/gpu/drm/xe/xe_gen_wa_oob.c b/drivers/gpu/drm/xe/xe_gen_wa_oob.c
index 106ee2b027f0..904cf47925aa 100644
--- a/drivers/gpu/drm/xe/xe_gen_wa_oob.c
+++ b/drivers/gpu/drm/xe/xe_gen_wa_oob.c
@@ -97,19 +97,27 @@ static int parse(FILE *input, FILE *csource, FILE *cheader)
if (name) {
fprintf(cheader, "\tXE_WA_OOB_%s = %u,\n", name, idx);
- fprintf(csource, "{ XE_RTP_NAME(\"%s\"), XE_RTP_RULES(%s) },\n",
+
+ /* Close previous entry before starting a new one */
+ if (idx)
+ fprintf(csource, ") },\n");
+
+ fprintf(csource, "{ XE_RTP_NAME(\"%s\"),\n XE_RTP_RULES(%s",
name, rules);
+ idx++;
} else {
- fprintf(csource, "{ XE_RTP_NAME(NULL), XE_RTP_RULES(%s) },\n",
- rules);
+ fprintf(csource, ", OR,\n\t%s", rules);
}
- idx++;
lineno++;
if (!is_continuation)
prev_name = name;
}
+ /* Close last entry */
+ if (idx)
+ fprintf(csource, ") },\n");
+
fprintf(cheader, "\t_XE_WA_OOB_COUNT = %u\n", idx);
return 0;
diff --git a/drivers/gpu/drm/xe/xe_gt.c b/drivers/gpu/drm/xe/xe_gt.c
index 31b2e64c70c6..58895ed22f6e 100644
--- a/drivers/gpu/drm/xe/xe_gt.c
+++ b/drivers/gpu/drm/xe/xe_gt.c
@@ -9,6 +9,7 @@
#include <drm/drm_managed.h>
#include <drm/xe_drm.h>
+
#include <generated/xe_wa_oob.h>
#include "instructions/xe_gfxpipe_commands.h"
@@ -95,6 +96,51 @@ void xe_gt_sanitize(struct xe_gt *gt)
gt->uc.guc.submission_state.enabled = false;
}
+static void xe_gt_enable_host_l2_vram(struct xe_gt *gt)
+{
+ u32 reg;
+ int err;
+
+ if (!XE_WA(gt, 16023588340))
+ return;
+
+ err = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
+ if (WARN_ON(err))
+ return;
+
+ if (!xe_gt_is_media_type(gt)) {
+ xe_mmio_write32(gt, SCRATCH1LPFC, EN_L3_RW_CCS_CACHE_FLUSH);
+ reg = xe_mmio_read32(gt, XE2_GAMREQSTRM_CTRL);
+ reg |= CG_DIS_CNTLBUS;
+ xe_mmio_write32(gt, XE2_GAMREQSTRM_CTRL, reg);
+ }
+
+ xe_gt_mcr_multicast_write(gt, XEHPC_L3CLOS_MASK(3), 0x3);
+ xe_force_wake_put(gt_to_fw(gt), XE_FW_GT);
+}
+
+static void xe_gt_disable_host_l2_vram(struct xe_gt *gt)
+{
+ u32 reg;
+ int err;
+
+ if (!XE_WA(gt, 16023588340))
+ return;
+
+ if (xe_gt_is_media_type(gt))
+ return;
+
+ err = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
+ if (WARN_ON(err))
+ return;
+
+ reg = xe_mmio_read32(gt, XE2_GAMREQSTRM_CTRL);
+ reg &= ~CG_DIS_CNTLBUS;
+ xe_mmio_write32(gt, XE2_GAMREQSTRM_CTRL, reg);
+
+ xe_force_wake_put(gt_to_fw(gt), XE_FW_GT);
+}
+
/**
* xe_gt_remove() - Clean up the GT structures before driver removal
* @gt: the GT object
@@ -111,6 +157,8 @@ void xe_gt_remove(struct xe_gt *gt)
for (i = 0; i < XE_ENGINE_CLASS_MAX; ++i)
xe_hw_fence_irq_finish(&gt->fence_irq[i]);
+
+ xe_gt_disable_host_l2_vram(gt);
}
static void gt_reset_worker(struct work_struct *w);
@@ -339,6 +387,7 @@ int xe_gt_init_early(struct xe_gt *gt)
xe_force_wake_init_gt(gt, gt_to_fw(gt));
xe_pcode_init(gt);
+ spin_lock_init(&gt->global_invl_lock);
return 0;
}
@@ -508,6 +557,7 @@ int xe_gt_init_hwconfig(struct xe_gt *gt)
xe_gt_mcr_init_early(gt);
xe_pat_init(gt);
+ xe_gt_enable_host_l2_vram(gt);
err = xe_uc_init(&gt->uc);
if (err)
@@ -643,6 +693,8 @@ static int do_gt_restart(struct xe_gt *gt)
xe_pat_init(gt);
+ xe_gt_enable_host_l2_vram(gt);
+
xe_gt_mcr_set_implicit_defaults(gt);
xe_reg_sr_apply_mmio(&gt->reg_sr, gt);
@@ -796,6 +848,8 @@ int xe_gt_suspend(struct xe_gt *gt)
xe_gt_idle_disable_pg(gt);
+ xe_gt_disable_host_l2_vram(gt);
+
XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL));
xe_gt_dbg(gt, "suspended\n");
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf.c
index 9dbba9ab7a9a..ef239440963c 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_pf.c
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf.c
@@ -5,7 +5,7 @@
#include <drm/drm_managed.h>
-#include "regs/xe_sriov_regs.h"
+#include "regs/xe_regs.h"
#include "xe_gt_sriov_pf.h"
#include "xe_gt_sriov_pf_config.h"
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c
index 4699b7836001..52c7277d243d 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c
@@ -1401,6 +1401,7 @@ static int pf_provision_vf_lmem(struct xe_gt *gt, unsigned int vfid, u64 size)
ALIGN(size, PAGE_SIZE),
ttm_bo_type_kernel,
XE_BO_FLAG_VRAM_IF_DGFX(tile) |
+ XE_BO_FLAG_NEEDS_2M |
XE_BO_FLAG_PINNED);
if (IS_ERR(bo))
return PTR_ERR(bo);
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_vf.c b/drivers/gpu/drm/xe/xe_gt_sriov_vf.c
index 41e46a00c01e..47222bd9988d 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_vf.c
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_vf.c
@@ -850,7 +850,7 @@ static struct vf_runtime_reg *vf_lookup_reg(struct xe_gt *gt, u32 addr)
xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
- return bsearch(&key, runtime->regs, runtime->regs_size, sizeof(key),
+ return bsearch(&key, runtime->regs, runtime->num_regs, sizeof(key),
vf_runtime_reg_cmp);
}
@@ -893,6 +893,32 @@ u32 xe_gt_sriov_vf_read32(struct xe_gt *gt, struct xe_reg reg)
}
/**
+ * xe_gt_sriov_vf_write32 - Handle a write to an inaccessible register.
+ * @gt: the &xe_gt
+ * @reg: the register to write
+ * @val: value to write
+ *
+ * This function is for VF use only.
+ * Currently it will trigger a WARN if running on debug build.
+ */
+void xe_gt_sriov_vf_write32(struct xe_gt *gt, struct xe_reg reg, u32 val)
+{
+ u32 addr = xe_mmio_adjusted_addr(gt, reg.addr);
+
+ xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
+ xe_gt_assert(gt, !reg.vf);
+
+ /*
+ * In the future, we may want to handle selected writes to inaccessible
+ * registers in some custom way, but for now let's just log a warning
+ * about such attempt, as likely we might be doing something wrong.
+ */
+ xe_gt_WARN(gt, IS_ENABLED(CONFIG_DRM_XE_DEBUG),
+ "VF is trying to write %#x to an inaccessible register %#x+%#x\n",
+ val, reg.addr, addr - reg.addr);
+}
+
+/**
* xe_gt_sriov_vf_print_config - Print VF self config.
* @gt: the &xe_gt
* @p: the &drm_printer
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_vf.h b/drivers/gpu/drm/xe/xe_gt_sriov_vf.h
index 0de7f8cbcfa6..e541ce57bec2 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_vf.h
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_vf.h
@@ -22,6 +22,7 @@ u32 xe_gt_sriov_vf_gmdid(struct xe_gt *gt);
u16 xe_gt_sriov_vf_guc_ids(struct xe_gt *gt);
u64 xe_gt_sriov_vf_lmem(struct xe_gt *gt);
u32 xe_gt_sriov_vf_read32(struct xe_gt *gt, struct xe_reg reg);
+void xe_gt_sriov_vf_write32(struct xe_gt *gt, struct xe_reg reg, u32 val);
void xe_gt_sriov_vf_print_config(struct xe_gt *gt, struct drm_printer *p);
void xe_gt_sriov_vf_print_runtime(struct xe_gt *gt, struct drm_printer *p);
diff --git a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c
index d9359976ab8b..87cb76a8718c 100644
--- a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c
+++ b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c
@@ -13,10 +13,13 @@
#include "xe_guc.h"
#include "xe_guc_ct.h"
#include "xe_mmio.h"
+#include "xe_pm.h"
#include "xe_sriov.h"
#include "xe_trace.h"
#include "regs/xe_guc_regs.h"
+#define FENCE_STACK_BIT DMA_FENCE_FLAG_USER_BITS
+
/*
* TLB inval depends on pending commands in the CT queue and then the real
* invalidation time. Double up the time to process full CT queue
@@ -33,6 +36,24 @@ static long tlb_timeout_jiffies(struct xe_gt *gt)
return hw_tlb_timeout + 2 * delay;
}
+static void
+__invalidation_fence_signal(struct xe_device *xe, struct xe_gt_tlb_invalidation_fence *fence)
+{
+ bool stack = test_bit(FENCE_STACK_BIT, &fence->base.flags);
+
+ trace_xe_gt_tlb_invalidation_fence_signal(xe, fence);
+ xe_gt_tlb_invalidation_fence_fini(fence);
+ dma_fence_signal(&fence->base);
+ if (!stack)
+ dma_fence_put(&fence->base);
+}
+
+static void
+invalidation_fence_signal(struct xe_device *xe, struct xe_gt_tlb_invalidation_fence *fence)
+{
+ list_del(&fence->link);
+ __invalidation_fence_signal(xe, fence);
+}
static void xe_gt_tlb_fence_timeout(struct work_struct *work)
{
@@ -54,10 +75,8 @@ static void xe_gt_tlb_fence_timeout(struct work_struct *work)
xe_gt_err(gt, "TLB invalidation fence timeout, seqno=%d recv=%d",
fence->seqno, gt->tlb_invalidation.seqno_recv);
- list_del(&fence->link);
fence->base.error = -ETIME;
- dma_fence_signal(&fence->base);
- dma_fence_put(&fence->base);
+ invalidation_fence_signal(xe, fence);
}
if (!list_empty(&gt->tlb_invalidation.pending_fences))
queue_delayed_work(system_wq,
@@ -87,21 +106,6 @@ int xe_gt_tlb_invalidation_init(struct xe_gt *gt)
return 0;
}
-static void
-__invalidation_fence_signal(struct xe_device *xe, struct xe_gt_tlb_invalidation_fence *fence)
-{
- trace_xe_gt_tlb_invalidation_fence_signal(xe, fence);
- dma_fence_signal(&fence->base);
- dma_fence_put(&fence->base);
-}
-
-static void
-invalidation_fence_signal(struct xe_device *xe, struct xe_gt_tlb_invalidation_fence *fence)
-{
- list_del(&fence->link);
- __invalidation_fence_signal(xe, fence);
-}
-
/**
* xe_gt_tlb_invalidation_reset - Initialize GT TLB invalidation reset
* @gt: graphics tile
@@ -111,7 +115,6 @@ invalidation_fence_signal(struct xe_device *xe, struct xe_gt_tlb_invalidation_fe
void xe_gt_tlb_invalidation_reset(struct xe_gt *gt)
{
struct xe_gt_tlb_invalidation_fence *fence, *next;
- struct xe_guc *guc = &gt->uc.guc;
int pending_seqno;
/*
@@ -134,7 +137,6 @@ void xe_gt_tlb_invalidation_reset(struct xe_gt *gt)
else
pending_seqno = gt->tlb_invalidation.seqno - 1;
WRITE_ONCE(gt->tlb_invalidation.seqno_recv, pending_seqno);
- wake_up_all(&guc->ct.wq);
list_for_each_entry_safe(fence, next,
&gt->tlb_invalidation.pending_fences, link)
@@ -165,6 +167,8 @@ static int send_tlb_invalidation(struct xe_guc *guc,
int seqno;
int ret;
+ xe_gt_assert(gt, fence);
+
/*
* XXX: The seqno algorithm relies on TLB invalidation being processed
* in order which they currently are, if that changes the algorithm will
@@ -173,14 +177,12 @@ static int send_tlb_invalidation(struct xe_guc *guc,
mutex_lock(&guc->ct.lock);
seqno = gt->tlb_invalidation.seqno;
- if (fence) {
- fence->seqno = seqno;
- trace_xe_gt_tlb_invalidation_fence_send(xe, fence);
- }
+ fence->seqno = seqno;
+ trace_xe_gt_tlb_invalidation_fence_send(xe, fence);
action[1] = seqno;
ret = xe_guc_ct_send_locked(&guc->ct, action, len,
G2H_LEN_DW_TLB_INVALIDATE, 1);
- if (!ret && fence) {
+ if (!ret) {
spin_lock_irq(&gt->tlb_invalidation.pending_lock);
/*
* We haven't actually published the TLB fence as per
@@ -201,7 +203,7 @@ static int send_tlb_invalidation(struct xe_guc *guc,
tlb_timeout_jiffies(gt));
}
spin_unlock_irq(&gt->tlb_invalidation.pending_lock);
- } else if (ret < 0 && fence) {
+ } else if (ret < 0) {
__invalidation_fence_signal(xe, fence);
}
if (!ret) {
@@ -209,7 +211,6 @@ static int send_tlb_invalidation(struct xe_guc *guc,
TLB_INVALIDATION_SEQNO_MAX;
if (!gt->tlb_invalidation.seqno)
gt->tlb_invalidation.seqno = 1;
- ret = seqno;
}
mutex_unlock(&guc->ct.lock);
@@ -223,14 +224,16 @@ static int send_tlb_invalidation(struct xe_guc *guc,
/**
* xe_gt_tlb_invalidation_guc - Issue a TLB invalidation on this GT for the GuC
* @gt: graphics tile
+ * @fence: invalidation fence which will be signal on TLB invalidation
+ * completion
*
* Issue a TLB invalidation for the GuC. Completion of TLB is asynchronous and
- * caller can use seqno + xe_gt_tlb_invalidation_wait to wait for completion.
+ * caller can use the invalidation fence to wait for completion.
*
- * Return: Seqno which can be passed to xe_gt_tlb_invalidation_wait on success,
- * negative error code on error.
+ * Return: 0 on success, negative error code on error
*/
-static int xe_gt_tlb_invalidation_guc(struct xe_gt *gt)
+static int xe_gt_tlb_invalidation_guc(struct xe_gt *gt,
+ struct xe_gt_tlb_invalidation_fence *fence)
{
u32 action[] = {
XE_GUC_ACTION_TLB_INVALIDATION,
@@ -238,7 +241,7 @@ static int xe_gt_tlb_invalidation_guc(struct xe_gt *gt)
MAKE_INVAL_OP(XE_GUC_TLB_INVAL_GUC),
};
- return send_tlb_invalidation(&gt->uc.guc, NULL, action,
+ return send_tlb_invalidation(&gt->uc.guc, fence, action,
ARRAY_SIZE(action));
}
@@ -257,13 +260,17 @@ int xe_gt_tlb_invalidation_ggtt(struct xe_gt *gt)
if (xe_guc_ct_enabled(&gt->uc.guc.ct) &&
gt->uc.guc.submission_state.enabled) {
- int seqno;
-
- seqno = xe_gt_tlb_invalidation_guc(gt);
- if (seqno <= 0)
- return seqno;
+ struct xe_gt_tlb_invalidation_fence fence;
+ int ret;
+
+ xe_gt_tlb_invalidation_fence_init(gt, &fence, true);
+ ret = xe_gt_tlb_invalidation_guc(gt, &fence);
+ if (ret < 0) {
+ xe_gt_tlb_invalidation_fence_fini(&fence);
+ return ret;
+ }
- xe_gt_tlb_invalidation_wait(gt, seqno);
+ xe_gt_tlb_invalidation_fence_wait(&fence);
} else if (xe_device_uc_enabled(xe) && !xe_device_wedged(xe)) {
if (IS_SRIOV_VF(xe))
return 0;
@@ -290,18 +297,16 @@ int xe_gt_tlb_invalidation_ggtt(struct xe_gt *gt)
*
* @gt: graphics tile
* @fence: invalidation fence which will be signal on TLB invalidation
- * completion, can be NULL
+ * completion
* @start: start address
* @end: end address
* @asid: address space id
*
* Issue a range based TLB invalidation if supported, if not fallback to a full
- * TLB invalidation. Completion of TLB is asynchronous and caller can either use
- * the invalidation fence or seqno + xe_gt_tlb_invalidation_wait to wait for
- * completion.
+ * TLB invalidation. Completion of TLB is asynchronous and caller can use
+ * the invalidation fence to wait for completion.
*
- * Return: Seqno which can be passed to xe_gt_tlb_invalidation_wait on success,
- * negative error code on error.
+ * Return: Negative error code on error, 0 on success
*/
int xe_gt_tlb_invalidation_range(struct xe_gt *gt,
struct xe_gt_tlb_invalidation_fence *fence,
@@ -312,11 +317,11 @@ int xe_gt_tlb_invalidation_range(struct xe_gt *gt,
u32 action[MAX_TLB_INVALIDATION_LEN];
int len = 0;
+ xe_gt_assert(gt, fence);
+
/* Execlists not supported */
if (gt_to_xe(gt)->info.force_execlist) {
- if (fence)
- __invalidation_fence_signal(xe, fence);
-
+ __invalidation_fence_signal(xe, fence);
return 0;
}
@@ -382,12 +387,10 @@ int xe_gt_tlb_invalidation_range(struct xe_gt *gt,
* @vma: VMA to invalidate
*
* Issue a range based TLB invalidation if supported, if not fallback to a full
- * TLB invalidation. Completion of TLB is asynchronous and caller can either use
- * the invalidation fence or seqno + xe_gt_tlb_invalidation_wait to wait for
- * completion.
+ * TLB invalidation. Completion of TLB is asynchronous and caller can use
+ * the invalidation fence to wait for completion.
*
- * Return: Seqno which can be passed to xe_gt_tlb_invalidation_wait on success,
- * negative error code on error.
+ * Return: Negative error code on error, 0 on success
*/
int xe_gt_tlb_invalidation_vma(struct xe_gt *gt,
struct xe_gt_tlb_invalidation_fence *fence,
@@ -401,43 +404,6 @@ int xe_gt_tlb_invalidation_vma(struct xe_gt *gt,
}
/**
- * xe_gt_tlb_invalidation_wait - Wait for TLB to complete
- * @gt: graphics tile
- * @seqno: seqno to wait which was returned from xe_gt_tlb_invalidation
- *
- * Wait for tlb_timeout_jiffies() for a TLB invalidation to complete.
- *
- * Return: 0 on success, -ETIME on TLB invalidation timeout
- */
-int xe_gt_tlb_invalidation_wait(struct xe_gt *gt, int seqno)
-{
- struct xe_guc *guc = &gt->uc.guc;
- int ret;
-
- /* Execlists not supported */
- if (gt_to_xe(gt)->info.force_execlist)
- return 0;
-
- /*
- * XXX: See above, this algorithm only works if seqno are always in
- * order
- */
- ret = wait_event_timeout(guc->ct.wq,
- tlb_invalidation_seqno_past(gt, seqno),
- tlb_timeout_jiffies(gt));
- if (!ret) {
- struct drm_printer p = xe_gt_err_printer(gt);
-
- xe_gt_err(gt, "TLB invalidation time'd out, seqno=%d, recv=%d\n",
- seqno, gt->tlb_invalidation.seqno_recv);
- xe_guc_ct_print(&guc->ct, &p, true);
- return -ETIME;
- }
-
- return 0;
-}
-
-/**
* xe_guc_tlb_invalidation_done_handler - TLB invalidation done handler
* @guc: guc
* @msg: message indicating TLB invalidation done
@@ -480,12 +446,7 @@ int xe_guc_tlb_invalidation_done_handler(struct xe_guc *guc, u32 *msg, u32 len)
return 0;
}
- /*
- * wake_up_all() and wait_event_timeout() already have the correct
- * barriers.
- */
WRITE_ONCE(gt->tlb_invalidation.seqno_recv, msg[0]);
- wake_up_all(&guc->ct.wq);
list_for_each_entry_safe(fence, next,
&gt->tlb_invalidation.pending_fences, link) {
@@ -508,3 +469,59 @@ int xe_guc_tlb_invalidation_done_handler(struct xe_guc *guc, u32 *msg, u32 len)
return 0;
}
+
+static const char *
+invalidation_fence_get_driver_name(struct dma_fence *dma_fence)
+{
+ return "xe";
+}
+
+static const char *
+invalidation_fence_get_timeline_name(struct dma_fence *dma_fence)
+{
+ return "invalidation_fence";
+}
+
+static const struct dma_fence_ops invalidation_fence_ops = {
+ .get_driver_name = invalidation_fence_get_driver_name,
+ .get_timeline_name = invalidation_fence_get_timeline_name,
+};
+
+/**
+ * xe_gt_tlb_invalidation_fence_init - Initialize TLB invalidation fence
+ * @gt: GT
+ * @fence: TLB invalidation fence to initialize
+ * @stack: fence is stack variable
+ *
+ * Initialize TLB invalidation fence for use. xe_gt_tlb_invalidation_fence_fini
+ * must be called if fence is not signaled.
+ */
+void xe_gt_tlb_invalidation_fence_init(struct xe_gt *gt,
+ struct xe_gt_tlb_invalidation_fence *fence,
+ bool stack)
+{
+ xe_pm_runtime_get_noresume(gt_to_xe(gt));
+
+ spin_lock_irq(&gt->tlb_invalidation.lock);
+ dma_fence_init(&fence->base, &invalidation_fence_ops,
+ &gt->tlb_invalidation.lock,
+ dma_fence_context_alloc(1), 1);
+ spin_unlock_irq(&gt->tlb_invalidation.lock);
+ INIT_LIST_HEAD(&fence->link);
+ if (stack)
+ set_bit(FENCE_STACK_BIT, &fence->base.flags);
+ else
+ dma_fence_get(&fence->base);
+ fence->gt = gt;
+}
+
+/**
+ * xe_gt_tlb_invalidation_fence_fini - Finalize TLB invalidation fence
+ * @fence: TLB invalidation fence to finalize
+ *
+ * Drop PM ref which fence took durinig init.
+ */
+void xe_gt_tlb_invalidation_fence_fini(struct xe_gt_tlb_invalidation_fence *fence)
+{
+ xe_pm_runtime_put(gt_to_xe(fence->gt));
+}
diff --git a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.h b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.h
index bf3bebd9f985..a84065fa324c 100644
--- a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.h
+++ b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.h
@@ -23,7 +23,17 @@ int xe_gt_tlb_invalidation_vma(struct xe_gt *gt,
int xe_gt_tlb_invalidation_range(struct xe_gt *gt,
struct xe_gt_tlb_invalidation_fence *fence,
u64 start, u64 end, u32 asid);
-int xe_gt_tlb_invalidation_wait(struct xe_gt *gt, int seqno);
int xe_guc_tlb_invalidation_done_handler(struct xe_guc *guc, u32 *msg, u32 len);
+void xe_gt_tlb_invalidation_fence_init(struct xe_gt *gt,
+ struct xe_gt_tlb_invalidation_fence *fence,
+ bool stack);
+void xe_gt_tlb_invalidation_fence_fini(struct xe_gt_tlb_invalidation_fence *fence);
+
+static inline void
+xe_gt_tlb_invalidation_fence_wait(struct xe_gt_tlb_invalidation_fence *fence)
+{
+ dma_fence_wait(&fence->base, false);
+}
+
#endif /* _XE_GT_TLB_INVALIDATION_ */
diff --git a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation_types.h b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation_types.h
index 934c828efe31..de6e825e0851 100644
--- a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation_types.h
+++ b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation_types.h
@@ -8,6 +8,8 @@
#include <linux/dma-fence.h>
+struct xe_gt;
+
/**
* struct xe_gt_tlb_invalidation_fence - XE GT TLB invalidation fence
*
@@ -17,6 +19,8 @@
struct xe_gt_tlb_invalidation_fence {
/** @base: dma fence base */
struct dma_fence base;
+ /** @gt: GT which fence belong to */
+ struct xe_gt *gt;
/** @link: link into list of pending tlb fences */
struct list_head link;
/** @seqno: seqno of TLB invalidation to signal fence one */
diff --git a/drivers/gpu/drm/xe/xe_gt_topology.c b/drivers/gpu/drm/xe/xe_gt_topology.c
index 25ff03ab8448..0662f71c6ede 100644
--- a/drivers/gpu/drm/xe/xe_gt_topology.c
+++ b/drivers/gpu/drm/xe/xe_gt_topology.c
@@ -6,6 +6,7 @@
#include "xe_gt_topology.h"
#include <linux/bitmap.h>
+#include <linux/compiler.h>
#include "regs/xe_gt_regs.h"
#include "xe_assert.h"
@@ -31,7 +32,7 @@ load_dss_mask(struct xe_gt *gt, xe_dss_mask_t mask, int numregs, ...)
}
static void
-load_eu_mask(struct xe_gt *gt, xe_eu_mask_t mask)
+load_eu_mask(struct xe_gt *gt, xe_eu_mask_t mask, enum xe_gt_eu_type *eu_type)
{
struct xe_device *xe = gt_to_xe(gt);
u32 reg_val = xe_mmio_read32(gt, XELP_EU_ENABLE);
@@ -47,11 +48,13 @@ load_eu_mask(struct xe_gt *gt, xe_eu_mask_t mask)
if (GRAPHICS_VERx100(xe) < 1250)
reg_val = ~reg_val & XELP_EU_MASK;
- /* On PVC, one bit = one EU */
- if (GRAPHICS_VERx100(xe) == 1260) {
+ if (GRAPHICS_VERx100(xe) == 1260 || GRAPHICS_VER(xe) >= 20) {
+ /* SIMD16 EUs, one bit == one EU */
+ *eu_type = XE_GT_EU_TYPE_SIMD16;
val = reg_val;
} else {
- /* All other platforms, one bit = 2 EU */
+ /* SIMD8 EUs, one bit == 2 EU */
+ *eu_type = XE_GT_EU_TYPE_SIMD8;
for (i = 0; i < fls(reg_val); i++)
if (reg_val & BIT(i))
val |= 0x3 << 2 * i;
@@ -213,7 +216,7 @@ xe_gt_topology_init(struct xe_gt *gt)
XEHP_GT_COMPUTE_DSS_ENABLE,
XEHPC_GT_COMPUTE_DSS_ENABLE_EXT,
XE2_GT_COMPUTE_DSS_2);
- load_eu_mask(gt, gt->fuse_topo.eu_mask_per_dss);
+ load_eu_mask(gt, gt->fuse_topo.eu_mask_per_dss, &gt->fuse_topo.eu_type);
load_l3_bank_mask(gt, gt->fuse_topo.l3_bank_mask);
p = drm_dbg_printer(&gt_to_xe(gt)->drm, DRM_UT_DRIVER, "GT topology");
@@ -221,6 +224,18 @@ xe_gt_topology_init(struct xe_gt *gt)
xe_gt_topology_dump(gt, &p);
}
+static const char *eu_type_to_str(enum xe_gt_eu_type eu_type)
+{
+ switch (eu_type) {
+ case XE_GT_EU_TYPE_SIMD16:
+ return "simd16";
+ case XE_GT_EU_TYPE_SIMD8:
+ return "simd8";
+ }
+
+ return NULL;
+}
+
void
xe_gt_topology_dump(struct xe_gt *gt, struct drm_printer *p)
{
@@ -231,6 +246,8 @@ xe_gt_topology_dump(struct xe_gt *gt, struct drm_printer *p)
drm_printf(p, "EU mask per DSS: %*pb\n", XE_MAX_EU_FUSE_BITS,
gt->fuse_topo.eu_mask_per_dss);
+ drm_printf(p, "EU type: %s\n",
+ eu_type_to_str(gt->fuse_topo.eu_type));
drm_printf(p, "L3 bank mask: %*pb\n", XE_MAX_L3_BANK_MASK_BITS,
gt->fuse_topo.l3_bank_mask);
diff --git a/drivers/gpu/drm/xe/xe_gt_types.h b/drivers/gpu/drm/xe/xe_gt_types.h
index 6b5e0b45efb0..631928258d71 100644
--- a/drivers/gpu/drm/xe/xe_gt_types.h
+++ b/drivers/gpu/drm/xe/xe_gt_types.h
@@ -27,6 +27,11 @@ enum xe_gt_type {
XE_GT_TYPE_MEDIA,
};
+enum xe_gt_eu_type {
+ XE_GT_EU_TYPE_SIMD8,
+ XE_GT_EU_TYPE_SIMD16,
+};
+
#define XE_MAX_DSS_FUSE_REGS 3
#define XE_MAX_DSS_FUSE_BITS (32 * XE_MAX_DSS_FUSE_REGS)
#define XE_MAX_EU_FUSE_REGS 1
@@ -343,6 +348,12 @@ struct xe_gt {
/** @fuse_topo.l3_bank_mask: L3 bank mask */
xe_l3_bank_mask_t l3_bank_mask;
+
+ /**
+ * @fuse_topo.eu_type: type/width of EU stored in
+ * fuse_topo.eu_mask_per_dss
+ */
+ enum xe_gt_eu_type eu_type;
} fuse_topo;
/** @steering: register steering for individual HW units */
@@ -362,6 +373,12 @@ struct xe_gt {
*/
spinlock_t mcr_lock;
+ /**
+ * @global_invl_lock: protects the register for the duration
+ * of a global invalidation of l2 cache
+ */
+ spinlock_t global_invl_lock;
+
/** @wa_active: keep track of active workarounds */
struct {
/** @wa_active.gt: bitmap with active GT workarounds */
@@ -370,8 +387,14 @@ struct xe_gt {
unsigned long *engine;
/** @wa_active.lrc: bitmap with active LRC workarounds */
unsigned long *lrc;
- /** @wa_active.oob: bitmap with active OOB workaroudns */
+ /** @wa_active.oob: bitmap with active OOB workarounds */
unsigned long *oob;
+ /**
+ * @wa_active.oob_initialized: mark oob as initialized to help
+ * detecting misuse of XE_WA() - it can only be called on
+ * initialization after OOB WAs have being processed
+ */
+ bool oob_initialized;
} wa_active;
/** @user_engines: engines present in GT and available to userspace */
diff --git a/drivers/gpu/drm/xe/xe_guc_ct.c b/drivers/gpu/drm/xe/xe_guc_ct.c
index 7d2e937da1d8..beeeb120d1fc 100644
--- a/drivers/gpu/drm/xe/xe_guc_ct.c
+++ b/drivers/gpu/drm/xe/xe_guc_ct.c
@@ -327,6 +327,8 @@ static void xe_guc_ct_set_state(struct xe_guc_ct *ct,
xe_gt_assert(ct_to_gt(ct), ct->g2h_outstanding == 0 ||
state == XE_GUC_CT_STATE_STOPPED);
+ if (ct->g2h_outstanding)
+ xe_pm_runtime_put(ct_to_xe(ct));
ct->g2h_outstanding = 0;
ct->state = state;
@@ -495,10 +497,15 @@ static void h2g_reserve_space(struct xe_guc_ct *ct, u32 cmd_len)
static void __g2h_reserve_space(struct xe_guc_ct *ct, u32 g2h_len, u32 num_g2h)
{
xe_gt_assert(ct_to_gt(ct), g2h_len <= ct->ctbs.g2h.info.space);
+ xe_gt_assert(ct_to_gt(ct), (!g2h_len && !num_g2h) ||
+ (g2h_len && num_g2h));
if (g2h_len) {
lockdep_assert_held(&ct->fast_lock);
+ if (!ct->g2h_outstanding)
+ xe_pm_runtime_get_noresume(ct_to_xe(ct));
+
ct->ctbs.g2h.info.space -= g2h_len;
ct->g2h_outstanding += num_g2h;
}
@@ -509,9 +516,11 @@ static void __g2h_release_space(struct xe_guc_ct *ct, u32 g2h_len)
lockdep_assert_held(&ct->fast_lock);
xe_gt_assert(ct_to_gt(ct), ct->ctbs.g2h.info.space + g2h_len <=
ct->ctbs.g2h.info.size - ct->ctbs.g2h.info.resv_space);
+ xe_gt_assert(ct_to_gt(ct), ct->g2h_outstanding);
ct->ctbs.g2h.info.space += g2h_len;
- --ct->g2h_outstanding;
+ if (!--ct->g2h_outstanding)
+ xe_pm_runtime_put(ct_to_xe(ct));
}
static void g2h_release_space(struct xe_guc_ct *ct, u32 g2h_len)
diff --git a/drivers/gpu/drm/xe/xe_guc_id_mgr.c b/drivers/gpu/drm/xe/xe_guc_id_mgr.c
index cd0549d0ef89..e845425d670b 100644
--- a/drivers/gpu/drm/xe/xe_guc_id_mgr.c
+++ b/drivers/gpu/drm/xe/xe_guc_id_mgr.c
@@ -97,8 +97,8 @@ int xe_guc_id_mgr_init(struct xe_guc_id_mgr *idm, unsigned int limit)
if (ret)
return ret;
- xe_gt_info(idm_to_gt(idm), "using %u GUC ID%s\n",
- idm->total, str_plural(idm->total));
+ xe_gt_dbg(idm_to_gt(idm), "using %u GuC ID%s\n",
+ idm->total, str_plural(idm->total));
return 0;
}
diff --git a/drivers/gpu/drm/xe/xe_guc_submit.c b/drivers/gpu/drm/xe/xe_guc_submit.c
index 8d7e7f4bbff7..460808507947 100644
--- a/drivers/gpu/drm/xe/xe_guc_submit.c
+++ b/drivers/gpu/drm/xe/xe_guc_submit.c
@@ -1071,7 +1071,9 @@ guc_exec_queue_timedout_job(struct drm_sched_job *drm_job)
struct xe_exec_queue *q = job->q;
struct xe_gpu_scheduler *sched = &q->guc->sched;
struct xe_guc *guc = exec_queue_to_guc(q);
+ const char *process_name = "no process";
int err = -ETIME;
+ pid_t pid = -1;
int i = 0;
bool wedged, skip_timeout_check;
@@ -1168,9 +1170,14 @@ trigger_reset:
goto sched_enable;
}
- xe_gt_notice(guc_to_gt(guc), "Timedout job: seqno=%u, lrc_seqno=%u, guc_id=%d, flags=0x%lx",
+ if (q->vm && q->vm->xef) {
+ process_name = q->vm->xef->process_name;
+ pid = q->vm->xef->pid;
+ }
+ xe_gt_notice(guc_to_gt(guc), "Timedout job: seqno=%u, lrc_seqno=%u, guc_id=%d, flags=0x%lx in %s [%d]",
xe_sched_job_seqno(job), xe_sched_job_lrc_seqno(job),
- q->guc->id, q->flags);
+ q->guc->id, q->flags, process_name, pid);
+
trace_xe_sched_job_timedout(job);
if (!exec_queue_killed(q))
@@ -1312,6 +1319,15 @@ static void __guc_exec_queue_process_msg_set_sched_props(struct xe_sched_msg *ms
kfree(msg);
}
+static void __suspend_fence_signal(struct xe_exec_queue *q)
+{
+ if (!q->guc->suspend_pending)
+ return;
+
+ WRITE_ONCE(q->guc->suspend_pending, false);
+ wake_up(&q->guc->suspend_wait);
+}
+
static void suspend_fence_signal(struct xe_exec_queue *q)
{
struct xe_guc *guc = exec_queue_to_guc(q);
@@ -1321,9 +1337,7 @@ static void suspend_fence_signal(struct xe_exec_queue *q)
guc_read_stopped(guc));
xe_assert(xe, q->guc->suspend_pending);
- q->guc->suspend_pending = false;
- smp_wmb();
- wake_up(&q->guc->suspend_wait);
+ __suspend_fence_signal(q);
}
static void __guc_exec_queue_process_msg_suspend(struct xe_sched_msg *msg)
@@ -1375,6 +1389,8 @@ static void __guc_exec_queue_process_msg_resume(struct xe_sched_msg *msg)
static void guc_exec_queue_process_msg(struct xe_sched_msg *msg)
{
+ struct xe_device *xe = guc_to_xe(exec_queue_to_guc(msg->private_data));
+
trace_xe_sched_msg_recv(msg);
switch (msg->opcode) {
@@ -1393,6 +1409,8 @@ static void guc_exec_queue_process_msg(struct xe_sched_msg *msg)
default:
XE_WARN_ON("Unknown message type");
}
+
+ xe_pm_runtime_put(xe);
}
static const struct drm_sched_backend_ops drm_sched_ops = {
@@ -1476,12 +1494,15 @@ static void guc_exec_queue_kill(struct xe_exec_queue *q)
{
trace_xe_exec_queue_kill(q);
set_exec_queue_killed(q);
+ __suspend_fence_signal(q);
xe_guc_exec_queue_trigger_cleanup(q);
}
static void guc_exec_queue_add_msg(struct xe_exec_queue *q, struct xe_sched_msg *msg,
u32 opcode)
{
+ xe_pm_runtime_get_noresume(guc_to_xe(exec_queue_to_guc(q)));
+
INIT_LIST_HEAD(&msg->link);
msg->opcode = opcode;
msg->private_data = q;
@@ -1572,12 +1593,31 @@ static int guc_exec_queue_suspend(struct xe_exec_queue *q)
return 0;
}
-static void guc_exec_queue_suspend_wait(struct xe_exec_queue *q)
+static int guc_exec_queue_suspend_wait(struct xe_exec_queue *q)
{
struct xe_guc *guc = exec_queue_to_guc(q);
+ int ret;
+
+ /*
+ * Likely don't need to check exec_queue_killed() as we clear
+ * suspend_pending upon kill but to be paranoid but races in which
+ * suspend_pending is set after kill also check kill here.
+ */
+ ret = wait_event_timeout(q->guc->suspend_wait,
+ !READ_ONCE(q->guc->suspend_pending) ||
+ exec_queue_killed(q) ||
+ guc_read_stopped(guc),
+ HZ * 5);
- wait_event(q->guc->suspend_wait, !q->guc->suspend_pending ||
- guc_read_stopped(guc));
+ if (!ret) {
+ xe_gt_warn(guc_to_gt(guc),
+ "Suspend fence, guc_id=%d, failed to respond",
+ q->guc->id);
+ /* XXX: Trigger GT reset? */
+ return -ETIME;
+ }
+
+ return 0;
}
static void guc_exec_queue_resume(struct xe_exec_queue *q)
diff --git a/drivers/gpu/drm/xe/xe_heci_gsc.c b/drivers/gpu/drm/xe/xe_heci_gsc.c
index 1c9d38b6f5f1..65b2e147c4b9 100644
--- a/drivers/gpu/drm/xe/xe_heci_gsc.c
+++ b/drivers/gpu/drm/xe/xe_heci_gsc.c
@@ -92,7 +92,7 @@ void xe_heci_gsc_fini(struct xe_device *xe)
{
struct xe_heci_gsc *heci_gsc = &xe->heci_gsc;
- if (!HAS_HECI_GSCFI(xe))
+ if (!HAS_HECI_GSCFI(xe) && !HAS_HECI_CSCFI(xe))
return;
if (heci_gsc->adev) {
@@ -177,12 +177,14 @@ void xe_heci_gsc_init(struct xe_device *xe)
const struct heci_gsc_def *def;
int ret;
- if (!HAS_HECI_GSCFI(xe))
+ if (!HAS_HECI_GSCFI(xe) && !HAS_HECI_CSCFI(xe))
return;
heci_gsc->irq = -1;
- if (xe->info.platform == XE_PVC) {
+ if (xe->info.platform == XE_BATTLEMAGE) {
+ def = &heci_gsc_def_dg2;
+ } else if (xe->info.platform == XE_PVC) {
def = &heci_gsc_def_pvc;
} else if (xe->info.platform == XE_DG2) {
def = &heci_gsc_def_dg2;
@@ -232,3 +234,23 @@ void xe_heci_gsc_irq_handler(struct xe_device *xe, u32 iir)
if (ret)
drm_err_ratelimited(&xe->drm, "error handling GSC irq: %d\n", ret);
}
+
+void xe_heci_csc_irq_handler(struct xe_device *xe, u32 iir)
+{
+ int ret;
+
+ if ((iir & CSC_IRQ_INTF(1)) == 0)
+ return;
+
+ if (!HAS_HECI_CSCFI(xe)) {
+ drm_warn_once(&xe->drm, "CSC irq: not supported");
+ return;
+ }
+
+ if (xe->heci_gsc.irq < 0)
+ return;
+
+ ret = generic_handle_irq(xe->heci_gsc.irq);
+ if (ret)
+ drm_err_ratelimited(&xe->drm, "error handling GSC irq: %d\n", ret);
+}
diff --git a/drivers/gpu/drm/xe/xe_heci_gsc.h b/drivers/gpu/drm/xe/xe_heci_gsc.h
index 9db454478fae..48b3b1838045 100644
--- a/drivers/gpu/drm/xe/xe_heci_gsc.h
+++ b/drivers/gpu/drm/xe/xe_heci_gsc.h
@@ -11,10 +11,15 @@ struct xe_device;
struct mei_aux_device;
/*
- * The HECI1 bit corresponds to bit15 and HECI2 to bit14.
+ * GSC HECI1 bit corresponds to bit15 and HECI2 to bit14.
* The reason for this is to allow growth for more interfaces in the future.
*/
-#define GSC_IRQ_INTF(_x) BIT(15 - (_x))
+#define GSC_IRQ_INTF(_x) BIT(15 - (_x))
+
+/*
+ * CSC HECI1 bit corresponds to bit9 and HECI2 to bit10.
+ */
+#define CSC_IRQ_INTF(_x) BIT(9 + (_x))
/**
* struct xe_heci_gsc - graphics security controller for xe, HECI interface
@@ -31,5 +36,6 @@ struct xe_heci_gsc {
void xe_heci_gsc_init(struct xe_device *xe);
void xe_heci_gsc_fini(struct xe_device *xe);
void xe_heci_gsc_irq_handler(struct xe_device *xe, u32 iir);
+void xe_heci_csc_irq_handler(struct xe_device *xe, u32 iir);
#endif /* __XE_HECI_GSC_DEV_H__ */
diff --git a/drivers/gpu/drm/xe/xe_irq.c b/drivers/gpu/drm/xe/xe_irq.c
index 85733f993d09..5f2c368c35ad 100644
--- a/drivers/gpu/drm/xe/xe_irq.c
+++ b/drivers/gpu/drm/xe/xe_irq.c
@@ -459,6 +459,8 @@ static irqreturn_t dg1_irq_handler(int irq, void *arg)
* the primary tile.
*/
if (id == 0) {
+ if (HAS_HECI_CSCFI(xe))
+ xe_heci_csc_irq_handler(xe, master_ctl);
xe_display_irq_handler(xe, master_ctl);
gu_misc_iir = gu_misc_irq_ack(xe, master_ctl);
}
diff --git a/drivers/gpu/drm/xe/xe_lmtt.c b/drivers/gpu/drm/xe/xe_lmtt.c
index 418661a88918..8999ac511555 100644
--- a/drivers/gpu/drm/xe/xe_lmtt.c
+++ b/drivers/gpu/drm/xe/xe_lmtt.c
@@ -7,7 +7,7 @@
#include <drm/drm_managed.h>
-#include "regs/xe_sriov_regs.h"
+#include "regs/xe_gt_regs.h"
#include "xe_assert.h"
#include "xe_bo.h"
@@ -71,7 +71,7 @@ static struct xe_lmtt_pt *lmtt_pt_alloc(struct xe_lmtt *lmtt, unsigned int level
lmtt->ops->lmtt_pte_num(level)),
ttm_bo_type_kernel,
XE_BO_FLAG_VRAM_IF_DGFX(lmtt_to_tile(lmtt)) |
- XE_BO_NEEDS_64K | XE_BO_FLAG_PINNED);
+ XE_BO_FLAG_NEEDS_64K | XE_BO_FLAG_PINNED);
if (IS_ERR(bo)) {
err = PTR_ERR(bo);
goto out_free_pt;
diff --git a/drivers/gpu/drm/xe/xe_migrate.c b/drivers/gpu/drm/xe/xe_migrate.c
index c9f5673353ee..6f24aaf58252 100644
--- a/drivers/gpu/drm/xe/xe_migrate.c
+++ b/drivers/gpu/drm/xe/xe_migrate.c
@@ -73,6 +73,7 @@ struct xe_migrate {
#define NUM_PT_SLOTS 32
#define LEVEL0_PAGE_TABLE_ENCODE_SIZE SZ_2M
#define MAX_NUM_PTE 512
+#define IDENTITY_OFFSET 256ULL
/*
* Although MI_STORE_DATA_IMM's "length" field is 10-bits, 0x3FE is the largest
@@ -84,15 +85,14 @@ struct xe_migrate {
#define MAX_PTE_PER_SDI 0x1FE
/**
- * xe_tile_migrate_engine() - Get this tile's migrate engine.
+ * xe_tile_migrate_exec_queue() - Get this tile's migrate exec queue.
* @tile: The tile.
*
- * Returns the default migrate engine of this tile.
- * TODO: Perhaps this function is slightly misplaced, and even unneeded?
+ * Returns the default migrate exec queue of this tile.
*
- * Return: The default migrate engine
+ * Return: The default migrate exec queue
*/
-struct xe_exec_queue *xe_tile_migrate_engine(struct xe_tile *tile)
+struct xe_exec_queue *xe_tile_migrate_exec_queue(struct xe_tile *tile)
{
return tile->migrate->q;
}
@@ -121,14 +121,64 @@ static u64 xe_migrate_vm_addr(u64 slot, u32 level)
return (slot + 1ULL) << xe_pt_shift(level + 1);
}
-static u64 xe_migrate_vram_ofs(struct xe_device *xe, u64 addr)
+static u64 xe_migrate_vram_ofs(struct xe_device *xe, u64 addr, bool is_comp_pte)
{
/*
* Remove the DPA to get a correct offset into identity table for the
* migrate offset
*/
+ u64 identity_offset = IDENTITY_OFFSET;
+
+ if (GRAPHICS_VER(xe) >= 20 && is_comp_pte)
+ identity_offset += DIV_ROUND_UP_ULL(xe->mem.vram.actual_physical_size, SZ_1G);
+
addr -= xe->mem.vram.dpa_base;
- return addr + (256ULL << xe_pt_shift(2));
+ return addr + (identity_offset << xe_pt_shift(2));
+}
+
+static void xe_migrate_program_identity(struct xe_device *xe, struct xe_vm *vm, struct xe_bo *bo,
+ u64 map_ofs, u64 vram_offset, u16 pat_index, u64 pt_2m_ofs)
+{
+ u64 pos, ofs, flags;
+ u64 entry;
+ /* XXX: Unclear if this should be usable_size? */
+ u64 vram_limit = xe->mem.vram.actual_physical_size +
+ xe->mem.vram.dpa_base;
+ u32 level = 2;
+
+ ofs = map_ofs + XE_PAGE_SIZE * level + vram_offset * 8;
+ flags = vm->pt_ops->pte_encode_addr(xe, 0, pat_index, level,
+ true, 0);
+
+ xe_assert(xe, IS_ALIGNED(xe->mem.vram.usable_size, SZ_2M));
+
+ /*
+ * Use 1GB pages when possible, last chunk always use 2M
+ * pages as mixing reserved memory (stolen, WOCPM) with a single
+ * mapping is not allowed on certain platforms.
+ */
+ for (pos = xe->mem.vram.dpa_base; pos < vram_limit;
+ pos += SZ_1G, ofs += 8) {
+ if (pos + SZ_1G >= vram_limit) {
+ entry = vm->pt_ops->pde_encode_bo(bo, pt_2m_ofs,
+ pat_index);
+ xe_map_wr(xe, &bo->vmap, ofs, u64, entry);
+
+ flags = vm->pt_ops->pte_encode_addr(xe, 0,
+ pat_index,
+ level - 1,
+ true, 0);
+
+ for (ofs = pt_2m_ofs; pos < vram_limit;
+ pos += SZ_2M, ofs += 8)
+ xe_map_wr(xe, &bo->vmap, ofs, u64, pos | flags);
+ break; /* Ensure pos == vram_limit assert correct */
+ }
+
+ xe_map_wr(xe, &bo->vmap, ofs, u64, pos | flags);
+ }
+
+ xe_assert(xe, pos == vram_limit);
}
static int xe_migrate_prepare_vm(struct xe_tile *tile, struct xe_migrate *m,
@@ -137,11 +187,13 @@ static int xe_migrate_prepare_vm(struct xe_tile *tile, struct xe_migrate *m,
struct xe_device *xe = tile_to_xe(tile);
u16 pat_index = xe->pat.idx[XE_CACHE_WB];
u8 id = tile->id;
- u32 num_entries = NUM_PT_SLOTS, num_level = vm->pt_root[id]->level,
- num_setup = num_level + 1;
+ u32 num_entries = NUM_PT_SLOTS, num_level = vm->pt_root[id]->level;
+#define VRAM_IDENTITY_MAP_COUNT 2
+ u32 num_setup = num_level + VRAM_IDENTITY_MAP_COUNT;
+#undef VRAM_IDENTITY_MAP_COUNT
u32 map_ofs, level, i;
struct xe_bo *bo, *batch = tile->mem.kernel_bb_pool->bo;
- u64 entry, pt30_ofs;
+ u64 entry, pt29_ofs;
/* Can't bump NUM_PT_SLOTS too high */
BUILD_BUG_ON(NUM_PT_SLOTS > SZ_2M/XE_PAGE_SIZE);
@@ -161,9 +213,9 @@ static int xe_migrate_prepare_vm(struct xe_tile *tile, struct xe_migrate *m,
if (IS_ERR(bo))
return PTR_ERR(bo);
- /* PT31 reserved for 2M identity map */
- pt30_ofs = bo->size - 2 * XE_PAGE_SIZE;
- entry = vm->pt_ops->pde_encode_bo(bo, pt30_ofs, pat_index);
+ /* PT30 & PT31 reserved for 2M identity map */
+ pt29_ofs = bo->size - 3 * XE_PAGE_SIZE;
+ entry = vm->pt_ops->pde_encode_bo(bo, pt29_ofs, pat_index);
xe_pt_write(xe, &vm->pt_root[id]->bo->vmap, 0, entry);
map_ofs = (num_entries - num_setup) * XE_PAGE_SIZE;
@@ -215,12 +267,12 @@ static int xe_migrate_prepare_vm(struct xe_tile *tile, struct xe_migrate *m,
} else {
u64 batch_addr = xe_bo_addr(batch, 0, XE_PAGE_SIZE);
- m->batch_base_ofs = xe_migrate_vram_ofs(xe, batch_addr);
+ m->batch_base_ofs = xe_migrate_vram_ofs(xe, batch_addr, false);
if (xe->info.has_usm) {
batch = tile->primary_gt->usm.bb_pool->bo;
batch_addr = xe_bo_addr(batch, 0, XE_PAGE_SIZE);
- m->usm_batch_base_ofs = xe_migrate_vram_ofs(xe, batch_addr);
+ m->usm_batch_base_ofs = xe_migrate_vram_ofs(xe, batch_addr, false);
}
}
@@ -254,55 +306,36 @@ static int xe_migrate_prepare_vm(struct xe_tile *tile, struct xe_migrate *m,
/* Identity map the entire vram at 256GiB offset */
if (IS_DGFX(xe)) {
- u64 pos, ofs, flags;
- /* XXX: Unclear if this should be usable_size? */
- u64 vram_limit = xe->mem.vram.actual_physical_size +
- xe->mem.vram.dpa_base;
+ u64 pt30_ofs = bo->size - 2 * XE_PAGE_SIZE;
- level = 2;
- ofs = map_ofs + XE_PAGE_SIZE * level + 256 * 8;
- flags = vm->pt_ops->pte_encode_addr(xe, 0, pat_index, level,
- true, 0);
-
- xe_assert(xe, IS_ALIGNED(xe->mem.vram.usable_size, SZ_2M));
+ xe_migrate_program_identity(xe, vm, bo, map_ofs, IDENTITY_OFFSET,
+ pat_index, pt30_ofs);
+ xe_assert(xe, xe->mem.vram.actual_physical_size <=
+ (MAX_NUM_PTE - IDENTITY_OFFSET) * SZ_1G);
/*
- * Use 1GB pages when possible, last chunk always use 2M
- * pages as mixing reserved memory (stolen, WOCPM) with a single
- * mapping is not allowed on certain platforms.
+ * Identity map the entire vram for compressed pat_index for xe2+
+ * if flat ccs is enabled.
*/
- for (pos = xe->mem.vram.dpa_base; pos < vram_limit;
- pos += SZ_1G, ofs += 8) {
- if (pos + SZ_1G >= vram_limit) {
- u64 pt31_ofs = bo->size - XE_PAGE_SIZE;
-
- entry = vm->pt_ops->pde_encode_bo(bo, pt31_ofs,
- pat_index);
- xe_map_wr(xe, &bo->vmap, ofs, u64, entry);
-
- flags = vm->pt_ops->pte_encode_addr(xe, 0,
- pat_index,
- level - 1,
- true, 0);
-
- for (ofs = pt31_ofs; pos < vram_limit;
- pos += SZ_2M, ofs += 8)
- xe_map_wr(xe, &bo->vmap, ofs, u64, pos | flags);
- break; /* Ensure pos == vram_limit assert correct */
- }
-
- xe_map_wr(xe, &bo->vmap, ofs, u64, pos | flags);
+ if (GRAPHICS_VER(xe) >= 20 && xe_device_has_flat_ccs(xe)) {
+ u16 comp_pat_index = xe->pat.idx[XE_CACHE_NONE_COMPRESSION];
+ u64 vram_offset = IDENTITY_OFFSET +
+ DIV_ROUND_UP_ULL(xe->mem.vram.actual_physical_size, SZ_1G);
+ u64 pt31_ofs = bo->size - XE_PAGE_SIZE;
+
+ xe_assert(xe, xe->mem.vram.actual_physical_size <= (MAX_NUM_PTE -
+ IDENTITY_OFFSET - IDENTITY_OFFSET / 2) * SZ_1G);
+ xe_migrate_program_identity(xe, vm, bo, map_ofs, vram_offset,
+ comp_pat_index, pt31_ofs);
}
-
- xe_assert(xe, pos == vram_limit);
}
/*
* Example layout created above, with root level = 3:
* [PT0...PT7]: kernel PT's for copy/clear; 64 or 4KiB PTE's
* [PT8]: Kernel PT for VM_BIND, 4 KiB PTE's
- * [PT9...PT27]: Userspace PT's for VM_BIND, 4 KiB PTE's
- * [PT28 = PDE 0] [PT29 = PDE 1] [PT30 = PDE 2] [PT31 = 2M vram identity map]
+ * [PT9...PT26]: Userspace PT's for VM_BIND, 4 KiB PTE's
+ * [PT27 = PDE 0] [PT28 = PDE 1] [PT29 = PDE 2] [PT30 & PT31 = 2M vram identity map]
*
* This makes the lowest part of the VM point to the pagetables.
* Hence the lowest 2M in the vm should point to itself, with a few writes
@@ -348,6 +381,11 @@ static u32 xe_migrate_usm_logical_mask(struct xe_gt *gt)
return logical_mask;
}
+static bool xe_migrate_needs_ccs_emit(struct xe_device *xe)
+{
+ return xe_device_has_flat_ccs(xe) && !(GRAPHICS_VER(xe) >= 20 && IS_DGFX(xe));
+}
+
/**
* xe_migrate_init() - Initialize a migrate context
* @tile: Back-pointer to the tile we're initializing for.
@@ -421,7 +459,7 @@ struct xe_migrate *xe_migrate_init(struct xe_tile *tile)
return ERR_PTR(err);
if (IS_DGFX(xe)) {
- if (xe_device_has_flat_ccs(xe))
+ if (xe_migrate_needs_ccs_emit(xe))
/* min chunk size corresponds to 4K of CCS Metadata */
m->min_chunk_size = SZ_4K * SZ_64K /
xe_device_ccs_bytes(xe, SZ_64K);
@@ -475,20 +513,26 @@ static bool xe_migrate_allow_identity(u64 size, const struct xe_res_cursor *cur)
return cur->size >= size;
}
+#define PTE_UPDATE_FLAG_IS_VRAM BIT(0)
+#define PTE_UPDATE_FLAG_IS_COMP_PTE BIT(1)
+
static u32 pte_update_size(struct xe_migrate *m,
- bool is_vram,
+ u32 flags,
struct ttm_resource *res,
struct xe_res_cursor *cur,
u64 *L0, u64 *L0_ofs, u32 *L0_pt,
u32 cmd_size, u32 pt_ofs, u32 avail_pts)
{
u32 cmds = 0;
+ bool is_vram = PTE_UPDATE_FLAG_IS_VRAM & flags;
+ bool is_comp_pte = PTE_UPDATE_FLAG_IS_COMP_PTE & flags;
*L0_pt = pt_ofs;
if (is_vram && xe_migrate_allow_identity(*L0, cur)) {
/* Offset into identity map. */
*L0_ofs = xe_migrate_vram_ofs(tile_to_xe(m->tile),
- cur->start + vram_region_gpu_offset(res));
+ cur->start + vram_region_gpu_offset(res),
+ is_comp_pte);
cmds += cmd_size;
} else {
/* Clip L0 to available size */
@@ -661,7 +705,7 @@ static u32 xe_migrate_ccs_copy(struct xe_migrate *m,
struct xe_gt *gt = m->tile->primary_gt;
u32 flush_flags = 0;
- if (xe_device_has_flat_ccs(gt_to_xe(gt)) && !copy_ccs && dst_is_indirect) {
+ if (!copy_ccs && dst_is_indirect) {
/*
* If the src is already in vram, then it should already
* have been cleared by us, or has been populated by the
@@ -737,6 +781,8 @@ struct dma_fence *xe_migrate_copy(struct xe_migrate *m,
bool copy_ccs = xe_device_has_flat_ccs(xe) &&
xe_bo_needs_ccs_pages(src_bo) && xe_bo_needs_ccs_pages(dst_bo);
bool copy_system_ccs = copy_ccs && (!src_is_vram || !dst_is_vram);
+ bool use_comp_pat = xe_device_has_flat_ccs(xe) &&
+ GRAPHICS_VER(xe) >= 20 && src_is_vram && !dst_is_vram;
/* Copying CCS between two different BOs is not supported yet. */
if (XE_WARN_ON(copy_ccs && src_bo != dst_bo))
@@ -763,10 +809,11 @@ struct dma_fence *xe_migrate_copy(struct xe_migrate *m,
u32 batch_size = 2; /* arb_clear() + MI_BATCH_BUFFER_END */
struct xe_sched_job *job;
struct xe_bb *bb;
- u32 flush_flags;
+ u32 flush_flags = 0;
u32 update_idx;
u64 ccs_ofs, ccs_size;
u32 ccs_pt;
+ u32 pte_flags;
bool usm = xe->info.has_usm;
u32 avail_pts = max_mem_transfer_per_pass(xe) / LEVEL0_PAGE_TABLE_ENCODE_SIZE;
@@ -779,17 +826,20 @@ struct dma_fence *xe_migrate_copy(struct xe_migrate *m,
src_L0 = min(src_L0, dst_L0);
- batch_size += pte_update_size(m, src_is_vram, src, &src_it, &src_L0,
+ pte_flags = src_is_vram ? PTE_UPDATE_FLAG_IS_VRAM : 0;
+ pte_flags |= use_comp_pat ? PTE_UPDATE_FLAG_IS_COMP_PTE : 0;
+ batch_size += pte_update_size(m, pte_flags, src, &src_it, &src_L0,
&src_L0_ofs, &src_L0_pt, 0, 0,
avail_pts);
- batch_size += pte_update_size(m, dst_is_vram, dst, &dst_it, &src_L0,
+ pte_flags = dst_is_vram ? PTE_UPDATE_FLAG_IS_VRAM : 0;
+ batch_size += pte_update_size(m, pte_flags, dst, &dst_it, &src_L0,
&dst_L0_ofs, &dst_L0_pt, 0,
avail_pts, avail_pts);
if (copy_system_ccs) {
ccs_size = xe_device_ccs_bytes(xe, src_L0);
- batch_size += pte_update_size(m, false, NULL, &ccs_it, &ccs_size,
+ batch_size += pte_update_size(m, 0, NULL, &ccs_it, &ccs_size,
&ccs_ofs, &ccs_pt, 0,
2 * avail_pts,
avail_pts);
@@ -798,7 +848,7 @@ struct dma_fence *xe_migrate_copy(struct xe_migrate *m,
/* Add copy commands size here */
batch_size += ((copy_only_ccs) ? 0 : EMIT_COPY_DW) +
- ((xe_device_has_flat_ccs(xe) ? EMIT_COPY_CCS_DW : 0));
+ ((xe_migrate_needs_ccs_emit(xe) ? EMIT_COPY_CCS_DW : 0));
bb = xe_bb_new(gt, batch_size, usm);
if (IS_ERR(bb)) {
@@ -827,11 +877,12 @@ struct dma_fence *xe_migrate_copy(struct xe_migrate *m,
if (!copy_only_ccs)
emit_copy(gt, bb, src_L0_ofs, dst_L0_ofs, src_L0, XE_PAGE_SIZE);
- flush_flags = xe_migrate_ccs_copy(m, bb, src_L0_ofs,
- IS_DGFX(xe) ? src_is_vram : src_is_pltt,
- dst_L0_ofs,
- IS_DGFX(xe) ? dst_is_vram : dst_is_pltt,
- src_L0, ccs_ofs, copy_ccs);
+ if (xe_migrate_needs_ccs_emit(xe))
+ flush_flags = xe_migrate_ccs_copy(m, bb, src_L0_ofs,
+ IS_DGFX(xe) ? src_is_vram : src_is_pltt,
+ dst_L0_ofs,
+ IS_DGFX(xe) ? dst_is_vram : dst_is_pltt,
+ src_L0, ccs_ofs, copy_ccs);
job = xe_bb_create_migration_job(m->q, bb,
xe_migrate_batch_base(m, usm),
@@ -1022,6 +1073,7 @@ struct dma_fence *xe_migrate_clear(struct xe_migrate *m,
struct xe_sched_job *job;
struct xe_bb *bb;
u32 batch_size, update_idx;
+ u32 pte_flags;
bool usm = xe->info.has_usm;
u32 avail_pts = max_mem_transfer_per_pass(xe) / LEVEL0_PAGE_TABLE_ENCODE_SIZE;
@@ -1029,13 +1081,14 @@ struct dma_fence *xe_migrate_clear(struct xe_migrate *m,
clear_L0 = xe_migrate_res_sizes(m, &src_it);
/* Calculate final sizes and batch size.. */
+ pte_flags = clear_vram ? PTE_UPDATE_FLAG_IS_VRAM : 0;
batch_size = 2 +
- pte_update_size(m, clear_vram, src, &src_it,
+ pte_update_size(m, pte_flags, src, &src_it,
&clear_L0, &clear_L0_ofs, &clear_L0_pt,
clear_system_ccs ? 0 : emit_clear_cmd_len(gt), 0,
avail_pts);
- if (xe_device_has_flat_ccs(xe))
+ if (xe_migrate_needs_ccs_emit(xe))
batch_size += EMIT_COPY_CCS_DW;
/* Clear commands */
@@ -1063,7 +1116,7 @@ struct dma_fence *xe_migrate_clear(struct xe_migrate *m,
if (!clear_system_ccs)
emit_clear(gt, bb, clear_L0_ofs, clear_L0, XE_PAGE_SIZE, clear_vram);
- if (xe_device_has_flat_ccs(xe)) {
+ if (xe_migrate_needs_ccs_emit(xe)) {
emit_copy_ccs(gt, bb, clear_L0_ofs, true,
m->cleared_mem_ofs, false, clear_L0);
flush_flags = MI_FLUSH_DW_CCS;
@@ -1126,6 +1179,7 @@ err_sync:
}
static void write_pgtable(struct xe_tile *tile, struct xe_bb *bb, u64 ppgtt_ofs,
+ const struct xe_vm_pgtable_update_op *pt_op,
const struct xe_vm_pgtable_update *update,
struct xe_migrate_pt_update *pt_update)
{
@@ -1146,7 +1200,7 @@ static void write_pgtable(struct xe_tile *tile, struct xe_bb *bb, u64 ppgtt_ofs,
if (!ppgtt_ofs)
ppgtt_ofs = xe_migrate_vram_ofs(tile_to_xe(tile),
xe_bo_addr(update->pt_bo, 0,
- XE_PAGE_SIZE));
+ XE_PAGE_SIZE), false);
do {
u64 addr = ppgtt_ofs + ofs * 8;
@@ -1160,8 +1214,12 @@ static void write_pgtable(struct xe_tile *tile, struct xe_bb *bb, u64 ppgtt_ofs,
bb->cs[bb->len++] = MI_STORE_DATA_IMM | MI_SDI_NUM_QW(chunk);
bb->cs[bb->len++] = lower_32_bits(addr);
bb->cs[bb->len++] = upper_32_bits(addr);
- ops->populate(pt_update, tile, NULL, bb->cs + bb->len, ofs, chunk,
- update);
+ if (pt_op->bind)
+ ops->populate(pt_update, tile, NULL, bb->cs + bb->len,
+ ofs, chunk, update);
+ else
+ ops->clear(pt_update, tile, NULL, bb->cs + bb->len,
+ ofs, chunk, update);
bb->len += chunk * 2;
ofs += chunk;
@@ -1186,114 +1244,58 @@ struct migrate_test_params {
static struct dma_fence *
xe_migrate_update_pgtables_cpu(struct xe_migrate *m,
- struct xe_vm *vm, struct xe_bo *bo,
- const struct xe_vm_pgtable_update *updates,
- u32 num_updates, bool wait_vm,
struct xe_migrate_pt_update *pt_update)
{
XE_TEST_DECLARE(struct migrate_test_params *test =
to_migrate_test_params
(xe_cur_kunit_priv(XE_TEST_LIVE_MIGRATE));)
const struct xe_migrate_pt_update_ops *ops = pt_update->ops;
- struct dma_fence *fence;
+ struct xe_vm *vm = pt_update->vops->vm;
+ struct xe_vm_pgtable_update_ops *pt_update_ops =
+ &pt_update->vops->pt_update_ops[pt_update->tile_id];
int err;
- u32 i;
+ u32 i, j;
if (XE_TEST_ONLY(test && test->force_gpu))
return ERR_PTR(-ETIME);
- if (bo && !dma_resv_test_signaled(bo->ttm.base.resv,
- DMA_RESV_USAGE_KERNEL))
- return ERR_PTR(-ETIME);
-
- if (wait_vm && !dma_resv_test_signaled(xe_vm_resv(vm),
- DMA_RESV_USAGE_BOOKKEEP))
- return ERR_PTR(-ETIME);
-
if (ops->pre_commit) {
pt_update->job = NULL;
err = ops->pre_commit(pt_update);
if (err)
return ERR_PTR(err);
}
- for (i = 0; i < num_updates; i++) {
- const struct xe_vm_pgtable_update *update = &updates[i];
-
- ops->populate(pt_update, m->tile, &update->pt_bo->vmap, NULL,
- update->ofs, update->qwords, update);
- }
-
- if (vm) {
- trace_xe_vm_cpu_bind(vm);
- xe_device_wmb(vm->xe);
- }
-
- fence = dma_fence_get_stub();
-
- return fence;
-}
-
-static bool no_in_syncs(struct xe_vm *vm, struct xe_exec_queue *q,
- struct xe_sync_entry *syncs, u32 num_syncs)
-{
- struct dma_fence *fence;
- int i;
- for (i = 0; i < num_syncs; i++) {
- fence = syncs[i].fence;
-
- if (fence && !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
- &fence->flags))
- return false;
- }
- if (q) {
- fence = xe_exec_queue_last_fence_get(q, vm);
- if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
- dma_fence_put(fence);
- return false;
+ for (i = 0; i < pt_update_ops->num_ops; ++i) {
+ const struct xe_vm_pgtable_update_op *pt_op =
+ &pt_update_ops->ops[i];
+
+ for (j = 0; j < pt_op->num_entries; j++) {
+ const struct xe_vm_pgtable_update *update =
+ &pt_op->entries[j];
+
+ if (pt_op->bind)
+ ops->populate(pt_update, m->tile,
+ &update->pt_bo->vmap, NULL,
+ update->ofs, update->qwords,
+ update);
+ else
+ ops->clear(pt_update, m->tile,
+ &update->pt_bo->vmap, NULL,
+ update->ofs, update->qwords, update);
}
- dma_fence_put(fence);
}
- return true;
+ trace_xe_vm_cpu_bind(vm);
+ xe_device_wmb(vm->xe);
+
+ return dma_fence_get_stub();
}
-/**
- * xe_migrate_update_pgtables() - Pipelined page-table update
- * @m: The migrate context.
- * @vm: The vm we'll be updating.
- * @bo: The bo whose dma-resv we will await before updating, or NULL if userptr.
- * @q: The exec queue to be used for the update or NULL if the default
- * migration engine is to be used.
- * @updates: An array of update descriptors.
- * @num_updates: Number of descriptors in @updates.
- * @syncs: Array of xe_sync_entry to await before updating. Note that waits
- * will block the engine timeline.
- * @num_syncs: Number of entries in @syncs.
- * @pt_update: Pointer to a struct xe_migrate_pt_update, which contains
- * pointers to callback functions and, if subclassed, private arguments to
- * those.
- *
- * Perform a pipelined page-table update. The update descriptors are typically
- * built under the same lock critical section as a call to this function. If
- * using the default engine for the updates, they will be performed in the
- * order they grab the job_mutex. If different engines are used, external
- * synchronization is needed for overlapping updates to maintain page-table
- * consistency. Note that the meaing of "overlapping" is that the updates
- * touch the same page-table, which might be a higher-level page-directory.
- * If no pipelining is needed, then updates may be performed by the cpu.
- *
- * Return: A dma_fence that, when signaled, indicates the update completion.
- */
-struct dma_fence *
-xe_migrate_update_pgtables(struct xe_migrate *m,
- struct xe_vm *vm,
- struct xe_bo *bo,
- struct xe_exec_queue *q,
- const struct xe_vm_pgtable_update *updates,
- u32 num_updates,
- struct xe_sync_entry *syncs, u32 num_syncs,
- struct xe_migrate_pt_update *pt_update)
+static struct dma_fence *
+__xe_migrate_update_pgtables(struct xe_migrate *m,
+ struct xe_migrate_pt_update *pt_update,
+ struct xe_vm_pgtable_update_ops *pt_update_ops)
{
const struct xe_migrate_pt_update_ops *ops = pt_update->ops;
struct xe_tile *tile = m->tile;
@@ -1302,59 +1304,53 @@ xe_migrate_update_pgtables(struct xe_migrate *m,
struct xe_sched_job *job;
struct dma_fence *fence;
struct drm_suballoc *sa_bo = NULL;
- struct xe_vma *vma = pt_update->vma;
struct xe_bb *bb;
- u32 i, batch_size, ppgtt_ofs, update_idx, page_ofs = 0;
+ u32 i, j, batch_size = 0, ppgtt_ofs, update_idx, page_ofs = 0;
+ u32 num_updates = 0, current_update = 0;
u64 addr;
int err = 0;
- bool usm = !q && xe->info.has_usm;
- bool first_munmap_rebind = vma &&
- vma->gpuva.flags & XE_VMA_FIRST_REBIND;
- struct xe_exec_queue *q_override = !q ? m->q : q;
- u16 pat_index = xe->pat.idx[XE_CACHE_WB];
+ bool is_migrate = pt_update_ops->q == m->q;
+ bool usm = is_migrate && xe->info.has_usm;
+
+ for (i = 0; i < pt_update_ops->num_ops; ++i) {
+ struct xe_vm_pgtable_update_op *pt_op = &pt_update_ops->ops[i];
+ struct xe_vm_pgtable_update *updates = pt_op->entries;
- /* Use the CPU if no in syncs and engine is idle */
- if (no_in_syncs(vm, q, syncs, num_syncs) && xe_exec_queue_is_idle(q_override)) {
- fence = xe_migrate_update_pgtables_cpu(m, vm, bo, updates,
- num_updates,
- first_munmap_rebind,
- pt_update);
- if (!IS_ERR(fence) || fence == ERR_PTR(-EAGAIN))
- return fence;
+ num_updates += pt_op->num_entries;
+ for (j = 0; j < pt_op->num_entries; ++j) {
+ u32 num_cmds = DIV_ROUND_UP(updates[j].qwords,
+ MAX_PTE_PER_SDI);
+
+ /* align noop + MI_STORE_DATA_IMM cmd prefix */
+ batch_size += 4 * num_cmds + updates[j].qwords * 2;
+ }
}
/* fixed + PTE entries */
if (IS_DGFX(xe))
- batch_size = 2;
+ batch_size += 2;
else
- batch_size = 6 + num_updates * 2;
-
- for (i = 0; i < num_updates; i++) {
- u32 num_cmds = DIV_ROUND_UP(updates[i].qwords, MAX_PTE_PER_SDI);
-
- /* align noop + MI_STORE_DATA_IMM cmd prefix */
- batch_size += 4 * num_cmds + updates[i].qwords * 2;
- }
-
- /*
- * XXX: Create temp bo to copy from, if batch_size becomes too big?
- *
- * Worst case: Sum(2 * (each lower level page size) + (top level page size))
- * Should be reasonably bound..
- */
- xe_tile_assert(tile, batch_size < SZ_128K);
+ batch_size += 6 * (num_updates / MAX_PTE_PER_SDI + 1) +
+ num_updates * 2;
- bb = xe_bb_new(gt, batch_size, !q && xe->info.has_usm);
+ bb = xe_bb_new(gt, batch_size, usm);
if (IS_ERR(bb))
return ERR_CAST(bb);
/* For sysmem PTE's, need to map them in our hole.. */
if (!IS_DGFX(xe)) {
+ u32 ptes, ofs;
+
ppgtt_ofs = NUM_KERNEL_PDE - 1;
- if (q) {
- xe_tile_assert(tile, num_updates <= NUM_VMUSA_WRITES_PER_UNIT);
+ if (!is_migrate) {
+ u32 num_units = DIV_ROUND_UP(num_updates,
+ NUM_VMUSA_WRITES_PER_UNIT);
- sa_bo = drm_suballoc_new(&m->vm_update_sa, 1,
+ if (num_units > m->vm_update_sa.size) {
+ err = -ENOBUFS;
+ goto err_bb;
+ }
+ sa_bo = drm_suballoc_new(&m->vm_update_sa, num_units,
GFP_KERNEL, true, 0);
if (IS_ERR(sa_bo)) {
err = PTR_ERR(sa_bo);
@@ -1370,18 +1366,49 @@ xe_migrate_update_pgtables(struct xe_migrate *m,
}
/* Map our PT's to gtt */
- bb->cs[bb->len++] = MI_STORE_DATA_IMM | MI_SDI_NUM_QW(num_updates);
- bb->cs[bb->len++] = ppgtt_ofs * XE_PAGE_SIZE + page_ofs;
- bb->cs[bb->len++] = 0; /* upper_32_bits */
-
- for (i = 0; i < num_updates; i++) {
- struct xe_bo *pt_bo = updates[i].pt_bo;
+ i = 0;
+ j = 0;
+ ptes = num_updates;
+ ofs = ppgtt_ofs * XE_PAGE_SIZE + page_ofs;
+ while (ptes) {
+ u32 chunk = min(MAX_PTE_PER_SDI, ptes);
+ u32 idx = 0;
+
+ bb->cs[bb->len++] = MI_STORE_DATA_IMM |
+ MI_SDI_NUM_QW(chunk);
+ bb->cs[bb->len++] = ofs;
+ bb->cs[bb->len++] = 0; /* upper_32_bits */
+
+ for (; i < pt_update_ops->num_ops; ++i) {
+ struct xe_vm_pgtable_update_op *pt_op =
+ &pt_update_ops->ops[i];
+ struct xe_vm_pgtable_update *updates = pt_op->entries;
+
+ for (; j < pt_op->num_entries; ++j, ++current_update, ++idx) {
+ struct xe_vm *vm = pt_update->vops->vm;
+ struct xe_bo *pt_bo = updates[j].pt_bo;
+
+ if (idx == chunk)
+ goto next_cmd;
+
+ xe_tile_assert(tile, pt_bo->size == SZ_4K);
+
+ /* Map a PT at most once */
+ if (pt_bo->update_index < 0)
+ pt_bo->update_index = current_update;
+
+ addr = vm->pt_ops->pte_encode_bo(pt_bo, 0,
+ XE_CACHE_WB, 0);
+ bb->cs[bb->len++] = lower_32_bits(addr);
+ bb->cs[bb->len++] = upper_32_bits(addr);
+ }
- xe_tile_assert(tile, pt_bo->size == SZ_4K);
+ j = 0;
+ }
- addr = vm->pt_ops->pte_encode_bo(pt_bo, 0, pat_index, 0);
- bb->cs[bb->len++] = lower_32_bits(addr);
- bb->cs[bb->len++] = upper_32_bits(addr);
+next_cmd:
+ ptes -= chunk;
+ ofs += chunk * sizeof(u64);
}
bb->cs[bb->len++] = MI_BATCH_BUFFER_END;
@@ -1389,19 +1416,36 @@ xe_migrate_update_pgtables(struct xe_migrate *m,
addr = xe_migrate_vm_addr(ppgtt_ofs, 0) +
(page_ofs / sizeof(u64)) * XE_PAGE_SIZE;
- for (i = 0; i < num_updates; i++)
- write_pgtable(tile, bb, addr + i * XE_PAGE_SIZE,
- &updates[i], pt_update);
+ for (i = 0; i < pt_update_ops->num_ops; ++i) {
+ struct xe_vm_pgtable_update_op *pt_op =
+ &pt_update_ops->ops[i];
+ struct xe_vm_pgtable_update *updates = pt_op->entries;
+
+ for (j = 0; j < pt_op->num_entries; ++j) {
+ struct xe_bo *pt_bo = updates[j].pt_bo;
+
+ write_pgtable(tile, bb, addr +
+ pt_bo->update_index * XE_PAGE_SIZE,
+ pt_op, &updates[j], pt_update);
+ }
+ }
} else {
/* phys pages, no preamble required */
bb->cs[bb->len++] = MI_BATCH_BUFFER_END;
update_idx = bb->len;
- for (i = 0; i < num_updates; i++)
- write_pgtable(tile, bb, 0, &updates[i], pt_update);
+ for (i = 0; i < pt_update_ops->num_ops; ++i) {
+ struct xe_vm_pgtable_update_op *pt_op =
+ &pt_update_ops->ops[i];
+ struct xe_vm_pgtable_update *updates = pt_op->entries;
+
+ for (j = 0; j < pt_op->num_entries; ++j)
+ write_pgtable(tile, bb, 0, pt_op, &updates[j],
+ pt_update);
+ }
}
- job = xe_bb_create_migration_job(q ?: m->q, bb,
+ job = xe_bb_create_migration_job(pt_update_ops->q, bb,
xe_migrate_batch_base(m, usm),
update_idx);
if (IS_ERR(job)) {
@@ -1409,46 +1453,20 @@ xe_migrate_update_pgtables(struct xe_migrate *m,
goto err_sa;
}
- /* Wait on BO move */
- if (bo) {
- err = xe_sched_job_add_deps(job, bo->ttm.base.resv,
- DMA_RESV_USAGE_KERNEL);
- if (err)
- goto err_job;
- }
-
- /*
- * Munmap style VM unbind, need to wait for all jobs to be complete /
- * trigger preempts before moving forward
- */
- if (first_munmap_rebind) {
- err = xe_sched_job_add_deps(job, xe_vm_resv(vm),
- DMA_RESV_USAGE_BOOKKEEP);
- if (err)
- goto err_job;
- }
-
- err = xe_sched_job_last_fence_add_dep(job, vm);
- for (i = 0; !err && i < num_syncs; i++)
- err = xe_sync_entry_add_deps(&syncs[i], job);
-
- if (err)
- goto err_job;
-
if (ops->pre_commit) {
pt_update->job = job;
err = ops->pre_commit(pt_update);
if (err)
goto err_job;
}
- if (!q)
+ if (is_migrate)
mutex_lock(&m->job_mutex);
xe_sched_job_arm(job);
fence = dma_fence_get(&job->drm.s_fence->finished);
xe_sched_job_push(job);
- if (!q)
+ if (is_migrate)
mutex_unlock(&m->job_mutex);
xe_bb_free(bb, fence);
@@ -1466,6 +1484,40 @@ err_bb:
}
/**
+ * xe_migrate_update_pgtables() - Pipelined page-table update
+ * @m: The migrate context.
+ * @pt_update: PT update arguments
+ *
+ * Perform a pipelined page-table update. The update descriptors are typically
+ * built under the same lock critical section as a call to this function. If
+ * using the default engine for the updates, they will be performed in the
+ * order they grab the job_mutex. If different engines are used, external
+ * synchronization is needed for overlapping updates to maintain page-table
+ * consistency. Note that the meaing of "overlapping" is that the updates
+ * touch the same page-table, which might be a higher-level page-directory.
+ * If no pipelining is needed, then updates may be performed by the cpu.
+ *
+ * Return: A dma_fence that, when signaled, indicates the update completion.
+ */
+struct dma_fence *
+xe_migrate_update_pgtables(struct xe_migrate *m,
+ struct xe_migrate_pt_update *pt_update)
+
+{
+ struct xe_vm_pgtable_update_ops *pt_update_ops =
+ &pt_update->vops->pt_update_ops[pt_update->tile_id];
+ struct dma_fence *fence;
+
+ fence = xe_migrate_update_pgtables_cpu(m, pt_update);
+
+ /* -ETIME indicates a job is needed, anything else is legit error */
+ if (!IS_ERR(fence) || PTR_ERR(fence) != -ETIME)
+ return fence;
+
+ return __xe_migrate_update_pgtables(m, pt_update, pt_update_ops);
+}
+
+/**
* xe_migrate_wait() - Complete all operations using the xe_migrate context
* @m: Migrate context to wait for.
*
diff --git a/drivers/gpu/drm/xe/xe_migrate.h b/drivers/gpu/drm/xe/xe_migrate.h
index 951f19318ea4..453e0ecf5034 100644
--- a/drivers/gpu/drm/xe/xe_migrate.h
+++ b/drivers/gpu/drm/xe/xe_migrate.h
@@ -47,6 +47,24 @@ struct xe_migrate_pt_update_ops {
struct xe_tile *tile, struct iosys_map *map,
void *pos, u32 ofs, u32 num_qwords,
const struct xe_vm_pgtable_update *update);
+ /**
+ * @clear: Clear a command buffer or page-table with ptes.
+ * @pt_update: Embeddable callback argument.
+ * @tile: The tile for the current operation.
+ * @map: struct iosys_map into the memory to be populated.
+ * @pos: If @map is NULL, map into the memory to be populated.
+ * @ofs: qword offset into @map, unused if @map is NULL.
+ * @num_qwords: Number of qwords to write.
+ * @update: Information about the PTEs to be inserted.
+ *
+ * This interface is intended to be used as a callback into the
+ * page-table system to populate command buffers or shared
+ * page-tables with PTEs.
+ */
+ void (*clear)(struct xe_migrate_pt_update *pt_update,
+ struct xe_tile *tile, struct iosys_map *map,
+ void *pos, u32 ofs, u32 num_qwords,
+ const struct xe_vm_pgtable_update *update);
/**
* @pre_commit: Callback to be called just before arming the
@@ -67,14 +85,10 @@ struct xe_migrate_pt_update_ops {
struct xe_migrate_pt_update {
/** @ops: Pointer to the struct xe_migrate_pt_update_ops callbacks */
const struct xe_migrate_pt_update_ops *ops;
- /** @vma: The vma we're updating the pagetable for. */
- struct xe_vma *vma;
+ /** @vops: VMA operations */
+ struct xe_vma_ops *vops;
/** @job: The job if a GPU page-table update. NULL otherwise */
struct xe_sched_job *job;
- /** @start: Start of update for the range fence */
- u64 start;
- /** @last: Last of update for the range fence */
- u64 last;
/** @tile_id: Tile ID of the update */
u8 tile_id;
};
@@ -96,15 +110,9 @@ struct xe_vm *xe_migrate_get_vm(struct xe_migrate *m);
struct dma_fence *
xe_migrate_update_pgtables(struct xe_migrate *m,
- struct xe_vm *vm,
- struct xe_bo *bo,
- struct xe_exec_queue *q,
- const struct xe_vm_pgtable_update *updates,
- u32 num_updates,
- struct xe_sync_entry *syncs, u32 num_syncs,
struct xe_migrate_pt_update *pt_update);
void xe_migrate_wait(struct xe_migrate *m);
-struct xe_exec_queue *xe_tile_migrate_engine(struct xe_tile *tile);
+struct xe_exec_queue *xe_tile_migrate_exec_queue(struct xe_tile *tile);
#endif
diff --git a/drivers/gpu/drm/xe/xe_mmio.c b/drivers/gpu/drm/xe/xe_mmio.c
index f92faad4b96d..bdcc7282385c 100644
--- a/drivers/gpu/drm/xe/xe_mmio.c
+++ b/drivers/gpu/drm/xe/xe_mmio.c
@@ -33,29 +33,56 @@ static void tiles_fini(void *arg)
tile->mmio.regs = NULL;
}
-int xe_mmio_probe_tiles(struct xe_device *xe)
+/*
+ * On multi-tile devices, partition the BAR space for MMIO on each tile,
+ * possibly accounting for register override on the number of tiles available.
+ * Resulting memory layout is like below:
+ *
+ * .----------------------. <- tile_count * tile_mmio_size
+ * | .... |
+ * |----------------------| <- 2 * tile_mmio_size
+ * | tile1->mmio.regs |
+ * |----------------------| <- 1 * tile_mmio_size
+ * | tile0->mmio.regs |
+ * '----------------------' <- 0MB
+ */
+static void mmio_multi_tile_setup(struct xe_device *xe, size_t tile_mmio_size)
{
- size_t tile_mmio_size = SZ_16M, tile_mmio_ext_size = xe->info.tile_mmio_ext_size;
- u8 id, tile_count = xe->info.tile_count;
- struct xe_gt *gt = xe_root_mmio_gt(xe);
struct xe_tile *tile;
void __iomem *regs;
- u32 mtcfg;
+ u8 id;
- if (tile_count == 1)
- goto add_mmio_ext;
+ /*
+ * Nothing to be done as tile 0 has already been setup earlier with the
+ * entire BAR mapped - see xe_mmio_init()
+ */
+ if (xe->info.tile_count == 1)
+ return;
+ /* Possibly override number of tile based on configuration register */
if (!xe->info.skip_mtcfg) {
+ struct xe_gt *gt = xe_root_mmio_gt(xe);
+ u8 tile_count;
+ u32 mtcfg;
+
+ /*
+ * Although the per-tile mmio regs are not yet initialized, this
+ * is fine as it's going to the root gt, that's guaranteed to be
+ * initialized earlier in xe_mmio_init()
+ */
mtcfg = xe_mmio_read64_2x32(gt, XEHP_MTCFG_ADDR);
tile_count = REG_FIELD_GET(TILE_COUNT, mtcfg) + 1;
+
if (tile_count < xe->info.tile_count) {
drm_info(&xe->drm, "tile_count: %d, reduced_tile_count %d\n",
xe->info.tile_count, tile_count);
xe->info.tile_count = tile_count;
/*
- * FIXME: Needs some work for standalone media, but should be impossible
- * with multi-tile for now.
+ * FIXME: Needs some work for standalone media, but
+ * should be impossible with multi-tile for now:
+ * multi-tile platform with standalone media doesn't
+ * exist
*/
xe->info.gt_count = xe->info.tile_count;
}
@@ -67,23 +94,51 @@ int xe_mmio_probe_tiles(struct xe_device *xe)
tile->mmio.regs = regs;
regs += tile_mmio_size;
}
+}
-add_mmio_ext:
- /*
- * By design, there's a contiguous multi-tile MMIO space (16MB hard coded per tile).
- * When supported, there could be an additional contiguous multi-tile MMIO extension
- * space ON TOP of it, and hence the necessity for distinguished MMIO spaces.
- */
- if (xe->info.has_mmio_ext) {
- regs = xe->mmio.regs + tile_mmio_size * tile_count;
+/*
+ * On top of all the multi-tile MMIO space there can be a platform-dependent
+ * extension for each tile, resulting in a layout like below:
+ *
+ * .----------------------. <- ext_base + tile_count * tile_mmio_ext_size
+ * | .... |
+ * |----------------------| <- ext_base + 2 * tile_mmio_ext_size
+ * | tile1->mmio_ext.regs |
+ * |----------------------| <- ext_base + 1 * tile_mmio_ext_size
+ * | tile0->mmio_ext.regs |
+ * |======================| <- ext_base = tile_count * tile_mmio_size
+ * | |
+ * | mmio.regs |
+ * | |
+ * '----------------------' <- 0MB
+ *
+ * Set up the tile[]->mmio_ext pointers/sizes.
+ */
+static void mmio_extension_setup(struct xe_device *xe, size_t tile_mmio_size,
+ size_t tile_mmio_ext_size)
+{
+ struct xe_tile *tile;
+ void __iomem *regs;
+ u8 id;
- for_each_tile(tile, xe, id) {
- tile->mmio_ext.size = tile_mmio_ext_size;
- tile->mmio_ext.regs = regs;
+ if (!xe->info.has_mmio_ext)
+ return;
- regs += tile_mmio_ext_size;
- }
+ regs = xe->mmio.regs + tile_mmio_size * xe->info.tile_count;
+ for_each_tile(tile, xe, id) {
+ tile->mmio_ext.size = tile_mmio_ext_size;
+ tile->mmio_ext.regs = regs;
+ regs += tile_mmio_ext_size;
}
+}
+
+int xe_mmio_probe_tiles(struct xe_device *xe)
+{
+ size_t tile_mmio_size = SZ_16M;
+ size_t tile_mmio_ext_size = xe->info.tile_mmio_ext_size;
+
+ mmio_multi_tile_setup(xe, tile_mmio_size);
+ mmio_extension_setup(xe, tile_mmio_size, tile_mmio_ext_size);
return devm_add_action_or_reset(xe->drm.dev, tiles_fini, xe);
}
@@ -121,12 +176,29 @@ int xe_mmio_init(struct xe_device *xe)
return devm_add_action_or_reset(xe->drm.dev, mmio_fini, xe);
}
+static void mmio_flush_pending_writes(struct xe_gt *gt)
+{
+#define DUMMY_REG_OFFSET 0x130030
+ struct xe_tile *tile = gt_to_tile(gt);
+ int i;
+
+ if (tile->xe->info.platform != XE_LUNARLAKE)
+ return;
+
+ /* 4 dummy writes */
+ for (i = 0; i < 4; i++)
+ writel(0, tile->mmio.regs + DUMMY_REG_OFFSET);
+}
+
u8 xe_mmio_read8(struct xe_gt *gt, struct xe_reg reg)
{
struct xe_tile *tile = gt_to_tile(gt);
u32 addr = xe_mmio_adjusted_addr(gt, reg.addr);
u8 val;
+ /* Wa_15015404425 */
+ mmio_flush_pending_writes(gt);
+
val = readb((reg.ext ? tile->mmio_ext.regs : tile->mmio.regs) + addr);
trace_xe_reg_rw(gt, false, addr, val, sizeof(val));
@@ -139,6 +211,9 @@ u16 xe_mmio_read16(struct xe_gt *gt, struct xe_reg reg)
u32 addr = xe_mmio_adjusted_addr(gt, reg.addr);
u16 val;
+ /* Wa_15015404425 */
+ mmio_flush_pending_writes(gt);
+
val = readw((reg.ext ? tile->mmio_ext.regs : tile->mmio.regs) + addr);
trace_xe_reg_rw(gt, false, addr, val, sizeof(val));
@@ -151,7 +226,11 @@ void xe_mmio_write32(struct xe_gt *gt, struct xe_reg reg, u32 val)
u32 addr = xe_mmio_adjusted_addr(gt, reg.addr);
trace_xe_reg_rw(gt, true, addr, val, sizeof(val));
- writel(val, (reg.ext ? tile->mmio_ext.regs : tile->mmio.regs) + addr);
+
+ if (!reg.vf && IS_SRIOV_VF(gt_to_xe(gt)))
+ xe_gt_sriov_vf_write32(gt, reg, val);
+ else
+ writel(val, (reg.ext ? tile->mmio_ext.regs : tile->mmio.regs) + addr);
}
u32 xe_mmio_read32(struct xe_gt *gt, struct xe_reg reg)
@@ -160,6 +239,9 @@ u32 xe_mmio_read32(struct xe_gt *gt, struct xe_reg reg)
u32 addr = xe_mmio_adjusted_addr(gt, reg.addr);
u32 val;
+ /* Wa_15015404425 */
+ mmio_flush_pending_writes(gt);
+
if (!reg.vf && IS_SRIOV_VF(gt_to_xe(gt)))
val = xe_gt_sriov_vf_read32(gt, reg);
else
@@ -251,37 +333,24 @@ u64 xe_mmio_read64_2x32(struct xe_gt *gt, struct xe_reg reg)
return (u64)udw << 32 | ldw;
}
-/**
- * xe_mmio_wait32() - Wait for a register to match the desired masked value
- * @gt: MMIO target GT
- * @reg: register to read value from
- * @mask: mask to be applied to the value read from the register
- * @val: desired value after applying the mask
- * @timeout_us: time out after this period of time. Wait logic tries to be
- * smart, applying an exponential backoff until @timeout_us is reached.
- * @out_val: if not NULL, points where to store the last unmasked value
- * @atomic: needs to be true if calling from an atomic context
- *
- * This function polls for the desired masked value and returns zero on success
- * or -ETIMEDOUT if timed out.
- *
- * Note that @timeout_us represents the minimum amount of time to wait before
- * giving up. The actual time taken by this function can be a little more than
- * @timeout_us for different reasons, specially in non-atomic contexts. Thus,
- * it is possible that this function succeeds even after @timeout_us has passed.
- */
-int xe_mmio_wait32(struct xe_gt *gt, struct xe_reg reg, u32 mask, u32 val, u32 timeout_us,
- u32 *out_val, bool atomic)
+static int __xe_mmio_wait32(struct xe_gt *gt, struct xe_reg reg, u32 mask, u32 val, u32 timeout_us,
+ u32 *out_val, bool atomic, bool expect_match)
{
ktime_t cur = ktime_get_raw();
const ktime_t end = ktime_add_us(cur, timeout_us);
int ret = -ETIMEDOUT;
s64 wait = 10;
u32 read;
+ bool check;
for (;;) {
read = xe_mmio_read32(gt, reg);
- if ((read & mask) == val) {
+
+ check = (read & mask) == val;
+ if (!expect_match)
+ check = !check;
+
+ if (check) {
ret = 0;
break;
}
@@ -302,7 +371,12 @@ int xe_mmio_wait32(struct xe_gt *gt, struct xe_reg reg, u32 mask, u32 val, u32 t
if (ret != 0) {
read = xe_mmio_read32(gt, reg);
- if ((read & mask) == val)
+
+ check = (read & mask) == val;
+ if (!expect_match)
+ check = !check;
+
+ if (check)
ret = 0;
}
@@ -313,62 +387,45 @@ int xe_mmio_wait32(struct xe_gt *gt, struct xe_reg reg, u32 mask, u32 val, u32 t
}
/**
- * xe_mmio_wait32_not() - Wait for a register to return anything other than the given masked value
+ * xe_mmio_wait32() - Wait for a register to match the desired masked value
* @gt: MMIO target GT
* @reg: register to read value from
* @mask: mask to be applied to the value read from the register
- * @val: value to match after applying the mask
+ * @val: desired value after applying the mask
* @timeout_us: time out after this period of time. Wait logic tries to be
* smart, applying an exponential backoff until @timeout_us is reached.
* @out_val: if not NULL, points where to store the last unmasked value
* @atomic: needs to be true if calling from an atomic context
*
- * This function polls for a masked value to change from a given value and
- * returns zero on success or -ETIMEDOUT if timed out.
+ * This function polls for the desired masked value and returns zero on success
+ * or -ETIMEDOUT if timed out.
*
* Note that @timeout_us represents the minimum amount of time to wait before
* giving up. The actual time taken by this function can be a little more than
* @timeout_us for different reasons, specially in non-atomic contexts. Thus,
* it is possible that this function succeeds even after @timeout_us has passed.
*/
+int xe_mmio_wait32(struct xe_gt *gt, struct xe_reg reg, u32 mask, u32 val, u32 timeout_us,
+ u32 *out_val, bool atomic)
+{
+ return __xe_mmio_wait32(gt, reg, mask, val, timeout_us, out_val, atomic, true);
+}
+
+/**
+ * xe_mmio_wait32_not() - Wait for a register to return anything other than the given masked value
+ * @gt: MMIO target GT
+ * @reg: register to read value from
+ * @mask: mask to be applied to the value read from the register
+ * @val: value not to be matched after applying the mask
+ * @timeout_us: time out after this period of time
+ * @out_val: if not NULL, points where to store the last unmasked value
+ * @atomic: needs to be true if calling from an atomic context
+ *
+ * This function works exactly like xe_mmio_wait32() with the exception that
+ * @val is expected not to be matched.
+ */
int xe_mmio_wait32_not(struct xe_gt *gt, struct xe_reg reg, u32 mask, u32 val, u32 timeout_us,
u32 *out_val, bool atomic)
{
- ktime_t cur = ktime_get_raw();
- const ktime_t end = ktime_add_us(cur, timeout_us);
- int ret = -ETIMEDOUT;
- s64 wait = 10;
- u32 read;
-
- for (;;) {
- read = xe_mmio_read32(gt, reg);
- if ((read & mask) != val) {
- ret = 0;
- break;
- }
-
- cur = ktime_get_raw();
- if (!ktime_before(cur, end))
- break;
-
- if (ktime_after(ktime_add_us(cur, wait), end))
- wait = ktime_us_delta(end, cur);
-
- if (atomic)
- udelay(wait);
- else
- usleep_range(wait, wait << 1);
- wait <<= 1;
- }
-
- if (ret != 0) {
- read = xe_mmio_read32(gt, reg);
- if ((read & mask) != val)
- ret = 0;
- }
-
- if (out_val)
- *out_val = read;
-
- return ret;
+ return __xe_mmio_wait32(gt, reg, mask, val, timeout_us, out_val, atomic, false);
}
diff --git a/drivers/gpu/drm/xe/xe_mmio.h b/drivers/gpu/drm/xe/xe_mmio.h
index 6ae0cc32c651..26551410ecc8 100644
--- a/drivers/gpu/drm/xe/xe_mmio.h
+++ b/drivers/gpu/drm/xe/xe_mmio.h
@@ -22,7 +22,6 @@ u32 xe_mmio_rmw32(struct xe_gt *gt, struct xe_reg reg, u32 clr, u32 set);
int xe_mmio_write32_and_verify(struct xe_gt *gt, struct xe_reg reg, u32 val, u32 mask, u32 eval);
bool xe_mmio_in_range(const struct xe_gt *gt, const struct xe_mmio_range *range, struct xe_reg reg);
-int xe_mmio_probe_vram(struct xe_device *xe);
u64 xe_mmio_read64_2x32(struct xe_gt *gt, struct xe_reg reg);
int xe_mmio_wait32(struct xe_gt *gt, struct xe_reg reg, u32 mask, u32 val, u32 timeout_us,
u32 *out_val, bool atomic);
diff --git a/drivers/gpu/drm/xe/xe_oa.c b/drivers/gpu/drm/xe/xe_oa.c
index 6d69f751bf78..3ef92eb8fbb1 100644
--- a/drivers/gpu/drm/xe/xe_oa.c
+++ b/drivers/gpu/drm/xe/xe_oa.c
@@ -641,7 +641,7 @@ static void xe_oa_store_flex(struct xe_oa_stream *stream, struct xe_lrc *lrc,
u32 offset = xe_bo_ggtt_addr(lrc->bo);
do {
- bb->cs[bb->len++] = MI_STORE_DATA_IMM | BIT(22) /* GGTT */ | 2;
+ bb->cs[bb->len++] = MI_STORE_DATA_IMM | MI_SDI_GGTT | MI_SDI_NUM_DW(1);
bb->cs[bb->len++] = offset + flex->offset * sizeof(u32);
bb->cs[bb->len++] = 0;
bb->cs[bb->len++] = flex->value;
diff --git a/drivers/gpu/drm/xe/xe_pat.c b/drivers/gpu/drm/xe/xe_pat.c
index 4ee32ee1cc88..722278cc23fc 100644
--- a/drivers/gpu/drm/xe/xe_pat.c
+++ b/drivers/gpu/drm/xe/xe_pat.c
@@ -7,6 +7,8 @@
#include <drm/xe_drm.h>
+#include <generated/xe_wa_oob.h>
+
#include "regs/xe_reg_defs.h"
#include "xe_assert.h"
#include "xe_device.h"
@@ -15,6 +17,7 @@
#include "xe_gt_mcr.h"
#include "xe_mmio.h"
#include "xe_sriov.h"
+#include "xe_wa.h"
#define _PAT_ATS 0x47fc
#define _PAT_INDEX(index) _PICK_EVEN_2RANGES(index, 8, \
@@ -382,7 +385,13 @@ void xe_pat_init_early(struct xe_device *xe)
if (GRAPHICS_VER(xe) == 20) {
xe->pat.ops = &xe2_pat_ops;
xe->pat.table = xe2_pat_table;
- xe->pat.n_entries = ARRAY_SIZE(xe2_pat_table);
+
+ /* Wa_16023588340. XXX: Should use XE_WA */
+ if (GRAPHICS_VERx100(xe) == 2001)
+ xe->pat.n_entries = 28; /* Disable CLOS3 */
+ else
+ xe->pat.n_entries = ARRAY_SIZE(xe2_pat_table);
+
xe->pat.idx[XE_CACHE_NONE] = 3;
xe->pat.idx[XE_CACHE_WT] = 15;
xe->pat.idx[XE_CACHE_WB] = 2;
diff --git a/drivers/gpu/drm/xe/xe_pci.c b/drivers/gpu/drm/xe/xe_pci.c
index 732ee0d02124..3c4a3c91377a 100644
--- a/drivers/gpu/drm/xe/xe_pci.c
+++ b/drivers/gpu/drm/xe/xe_pci.c
@@ -59,6 +59,7 @@ struct xe_device_desc {
u8 has_display:1;
u8 has_heci_gscfi:1;
+ u8 has_heci_cscfi:1;
u8 has_llc:1;
u8 has_mmio_ext:1;
u8 has_sriov:1;
@@ -345,6 +346,7 @@ static const struct xe_device_desc bmg_desc = {
PLATFORM(BATTLEMAGE),
.has_display = true,
.require_force_probe = true,
+ .has_heci_cscfi = 1,
};
#undef PLATFORM
@@ -606,6 +608,7 @@ static int xe_info_init_early(struct xe_device *xe,
xe->info.is_dgfx = desc->is_dgfx;
xe->info.has_heci_gscfi = desc->has_heci_gscfi;
+ xe->info.has_heci_cscfi = desc->has_heci_cscfi;
xe->info.has_llc = desc->has_llc;
xe->info.has_mmio_ext = desc->has_mmio_ext;
xe->info.has_sriov = desc->has_sriov;
@@ -815,7 +818,7 @@ static int xe_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (err)
return err;
- drm_dbg(&xe->drm, "%s %s %04x:%04x dgfx:%d gfx:%s (%d.%02d) media:%s (%d.%02d) display:%s dma_m_s:%d tc:%d gscfi:%d",
+ drm_dbg(&xe->drm, "%s %s %04x:%04x dgfx:%d gfx:%s (%d.%02d) media:%s (%d.%02d) display:%s dma_m_s:%d tc:%d gscfi:%d cscfi:%d",
desc->platform_name,
subplatform_desc ? subplatform_desc->name : "",
xe->info.devid, xe->info.revid,
@@ -828,7 +831,7 @@ static int xe_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
xe->info.media_verx100 % 100,
str_yes_no(xe->info.enable_display),
xe->info.dma_mask_size, xe->info.tile_count,
- xe->info.has_heci_gscfi);
+ xe->info.has_heci_gscfi, xe->info.has_heci_cscfi);
drm_dbg(&xe->drm, "Stepping = (G:%s, M:%s, D:%s, B:%s)\n",
xe_step_name(xe->info.step.graphics),
diff --git a/drivers/gpu/drm/xe/xe_pm.c b/drivers/gpu/drm/xe/xe_pm.c
index de3b5df65e48..9f3c14fd9f33 100644
--- a/drivers/gpu/drm/xe/xe_pm.c
+++ b/drivers/gpu/drm/xe/xe_pm.c
@@ -20,6 +20,7 @@
#include "xe_guc.h"
#include "xe_irq.h"
#include "xe_pcode.h"
+#include "xe_trace.h"
#include "xe_wa.h"
/**
@@ -87,6 +88,7 @@ int xe_pm_suspend(struct xe_device *xe)
int err;
drm_dbg(&xe->drm, "Suspending device\n");
+ trace_xe_pm_suspend(xe, __builtin_return_address(0));
for_each_gt(gt, xe, id)
xe_gt_suspend_prepare(gt);
@@ -131,6 +133,7 @@ int xe_pm_resume(struct xe_device *xe)
int err;
drm_dbg(&xe->drm, "Resuming device\n");
+ trace_xe_pm_resume(xe, __builtin_return_address(0));
for_each_tile(tile, xe, id)
xe_wa_apply_tile_workarounds(tile);
@@ -326,6 +329,7 @@ int xe_pm_runtime_suspend(struct xe_device *xe)
u8 id;
int err = 0;
+ trace_xe_pm_runtime_suspend(xe, __builtin_return_address(0));
/* Disable access_ongoing asserts and prevent recursive pm calls */
xe_pm_write_callback_task(xe, current);
@@ -399,6 +403,7 @@ int xe_pm_runtime_resume(struct xe_device *xe)
u8 id;
int err = 0;
+ trace_xe_pm_runtime_resume(xe, __builtin_return_address(0));
/* Disable access_ongoing asserts and prevent recursive pm calls */
xe_pm_write_callback_task(xe, current);
@@ -463,6 +468,7 @@ static void pm_runtime_lockdep_prime(void)
*/
void xe_pm_runtime_get(struct xe_device *xe)
{
+ trace_xe_pm_runtime_get(xe, __builtin_return_address(0));
pm_runtime_get_noresume(xe->drm.dev);
if (xe_pm_read_callback_task(xe) == current)
@@ -478,6 +484,7 @@ void xe_pm_runtime_get(struct xe_device *xe)
*/
void xe_pm_runtime_put(struct xe_device *xe)
{
+ trace_xe_pm_runtime_put(xe, __builtin_return_address(0));
if (xe_pm_read_callback_task(xe) == current) {
pm_runtime_put_noidle(xe->drm.dev);
} else {
@@ -495,6 +502,7 @@ void xe_pm_runtime_put(struct xe_device *xe)
*/
int xe_pm_runtime_get_ioctl(struct xe_device *xe)
{
+ trace_xe_pm_runtime_get_ioctl(xe, __builtin_return_address(0));
if (WARN_ON(xe_pm_read_callback_task(xe) == current))
return -ELOOP;
diff --git a/drivers/gpu/drm/xe/xe_preempt_fence.c b/drivers/gpu/drm/xe/xe_preempt_fence.c
index e8b8ae5c6485..56e709d2fb30 100644
--- a/drivers/gpu/drm/xe/xe_preempt_fence.c
+++ b/drivers/gpu/drm/xe/xe_preempt_fence.c
@@ -17,10 +17,16 @@ static void preempt_fence_work_func(struct work_struct *w)
container_of(w, typeof(*pfence), preempt_work);
struct xe_exec_queue *q = pfence->q;
- if (pfence->error)
+ if (pfence->error) {
dma_fence_set_error(&pfence->base, pfence->error);
- else
- q->ops->suspend_wait(q);
+ } else if (!q->ops->reset_status(q)) {
+ int err = q->ops->suspend_wait(q);
+
+ if (err)
+ dma_fence_set_error(&pfence->base, err);
+ } else {
+ dma_fence_set_error(&pfence->base, -ENOENT);
+ }
dma_fence_signal(&pfence->base);
/*
diff --git a/drivers/gpu/drm/xe/xe_pt.c b/drivers/gpu/drm/xe/xe_pt.c
index ade9e7a3a0ad..97a6a0b0b8ba 100644
--- a/drivers/gpu/drm/xe/xe_pt.c
+++ b/drivers/gpu/drm/xe/xe_pt.c
@@ -9,12 +9,15 @@
#include "xe_bo.h"
#include "xe_device.h"
#include "xe_drm_client.h"
+#include "xe_exec_queue.h"
#include "xe_gt.h"
#include "xe_gt_tlb_invalidation.h"
#include "xe_migrate.h"
#include "xe_pt_types.h"
#include "xe_pt_walk.h"
#include "xe_res_cursor.h"
+#include "xe_sched_job.h"
+#include "xe_sync.h"
#include "xe_trace.h"
#include "xe_ttm_stolen_mgr.h"
#include "xe_vm.h"
@@ -325,6 +328,7 @@ xe_pt_new_shared(struct xe_walk_update *wupd, struct xe_pt *parent,
entry->pt = parent;
entry->flags = 0;
entry->qwords = 0;
+ entry->pt_bo->update_index = -1;
if (alloc_entries) {
entry->pt_entries = kmalloc_array(XE_PDES,
@@ -842,19 +846,27 @@ xe_vm_populate_pgtable(struct xe_migrate_pt_update *pt_update, struct xe_tile *t
}
}
-static void xe_pt_abort_bind(struct xe_vma *vma,
- struct xe_vm_pgtable_update *entries,
- u32 num_entries)
+static void xe_pt_cancel_bind(struct xe_vma *vma,
+ struct xe_vm_pgtable_update *entries,
+ u32 num_entries)
{
u32 i, j;
for (i = 0; i < num_entries; i++) {
- if (!entries[i].pt_entries)
+ struct xe_pt *pt = entries[i].pt;
+
+ if (!pt)
continue;
- for (j = 0; j < entries[i].qwords; j++)
- xe_pt_destroy(entries[i].pt_entries[j].pt, xe_vma_vm(vma)->flags, NULL);
+ if (pt->level) {
+ for (j = 0; j < entries[i].qwords; j++)
+ xe_pt_destroy(entries[i].pt_entries[j].pt,
+ xe_vma_vm(vma)->flags, NULL);
+ }
+
kfree(entries[i].pt_entries);
+ entries[i].pt_entries = NULL;
+ entries[i].qwords = 0;
}
}
@@ -864,18 +876,15 @@ static void xe_pt_commit_locks_assert(struct xe_vma *vma)
lockdep_assert_held(&vm->lock);
- if (xe_vma_is_userptr(vma))
- lockdep_assert_held_read(&vm->userptr.notifier_lock);
- else if (!xe_vma_is_null(vma))
+ if (!xe_vma_is_userptr(vma) && !xe_vma_is_null(vma))
dma_resv_assert_held(xe_vma_bo(vma)->ttm.base.resv);
xe_vm_assert_held(vm);
}
-static void xe_pt_commit_bind(struct xe_vma *vma,
- struct xe_vm_pgtable_update *entries,
- u32 num_entries, bool rebind,
- struct llist_head *deferred)
+static void xe_pt_commit(struct xe_vma *vma,
+ struct xe_vm_pgtable_update *entries,
+ u32 num_entries, struct llist_head *deferred)
{
u32 i, j;
@@ -883,31 +892,90 @@ static void xe_pt_commit_bind(struct xe_vma *vma,
for (i = 0; i < num_entries; i++) {
struct xe_pt *pt = entries[i].pt;
+
+ if (!pt->level)
+ continue;
+
+ for (j = 0; j < entries[i].qwords; j++) {
+ struct xe_pt *oldpte = entries[i].pt_entries[j].pt;
+
+ xe_pt_destroy(oldpte, xe_vma_vm(vma)->flags, deferred);
+ }
+ }
+}
+
+static void xe_pt_abort_bind(struct xe_vma *vma,
+ struct xe_vm_pgtable_update *entries,
+ u32 num_entries, bool rebind)
+{
+ int i, j;
+
+ xe_pt_commit_locks_assert(vma);
+
+ for (i = num_entries - 1; i >= 0; --i) {
+ struct xe_pt *pt = entries[i].pt;
struct xe_pt_dir *pt_dir;
if (!rebind)
- pt->num_live += entries[i].qwords;
+ pt->num_live -= entries[i].qwords;
- if (!pt->level) {
- kfree(entries[i].pt_entries);
+ if (!pt->level)
continue;
+
+ pt_dir = as_xe_pt_dir(pt);
+ for (j = 0; j < entries[i].qwords; j++) {
+ u32 j_ = j + entries[i].ofs;
+ struct xe_pt *newpte = xe_pt_entry(pt_dir, j_);
+ struct xe_pt *oldpte = entries[i].pt_entries[j].pt;
+
+ pt_dir->children[j_] = oldpte ? &oldpte->base : 0;
+ xe_pt_destroy(newpte, xe_vma_vm(vma)->flags, NULL);
}
+ }
+}
+
+static void xe_pt_commit_prepare_bind(struct xe_vma *vma,
+ struct xe_vm_pgtable_update *entries,
+ u32 num_entries, bool rebind)
+{
+ u32 i, j;
+
+ xe_pt_commit_locks_assert(vma);
+
+ for (i = 0; i < num_entries; i++) {
+ struct xe_pt *pt = entries[i].pt;
+ struct xe_pt_dir *pt_dir;
+
+ if (!rebind)
+ pt->num_live += entries[i].qwords;
+
+ if (!pt->level)
+ continue;
pt_dir = as_xe_pt_dir(pt);
for (j = 0; j < entries[i].qwords; j++) {
u32 j_ = j + entries[i].ofs;
struct xe_pt *newpte = entries[i].pt_entries[j].pt;
+ struct xe_pt *oldpte = NULL;
if (xe_pt_entry(pt_dir, j_))
- xe_pt_destroy(xe_pt_entry(pt_dir, j_),
- xe_vma_vm(vma)->flags, deferred);
+ oldpte = xe_pt_entry(pt_dir, j_);
pt_dir->children[j_] = &newpte->base;
+ entries[i].pt_entries[j].pt = oldpte;
}
- kfree(entries[i].pt_entries);
}
}
+static void xe_pt_free_bind(struct xe_vm_pgtable_update *entries,
+ u32 num_entries)
+{
+ u32 i;
+
+ for (i = 0; i < num_entries; i++)
+ kfree(entries[i].pt_entries);
+}
+
static int
xe_pt_prepare_bind(struct xe_tile *tile, struct xe_vma *vma,
struct xe_vm_pgtable_update *entries, u32 *num_entries)
@@ -918,20 +986,19 @@ xe_pt_prepare_bind(struct xe_tile *tile, struct xe_vma *vma,
err = xe_pt_stage_bind(tile, vma, entries, num_entries);
if (!err)
xe_tile_assert(tile, *num_entries);
- else /* abort! */
- xe_pt_abort_bind(vma, entries, *num_entries);
return err;
}
static void xe_vm_dbg_print_entries(struct xe_device *xe,
const struct xe_vm_pgtable_update *entries,
- unsigned int num_entries)
+ unsigned int num_entries, bool bind)
#if (IS_ENABLED(CONFIG_DRM_XE_DEBUG_VM))
{
unsigned int i;
- vm_dbg(&xe->drm, "%u entries to update\n", num_entries);
+ vm_dbg(&xe->drm, "%s: %u entries to update\n", bind ? "bind" : "unbind",
+ num_entries);
for (i = 0; i < num_entries; i++) {
const struct xe_vm_pgtable_update *entry = &entries[i];
struct xe_pt *xe_pt = entry->pt;
@@ -952,66 +1019,108 @@ static void xe_vm_dbg_print_entries(struct xe_device *xe,
{}
#endif
-#ifdef CONFIG_DRM_XE_USERPTR_INVAL_INJECT
-
-static int xe_pt_userptr_inject_eagain(struct xe_userptr_vma *uvma)
+static bool no_in_syncs(struct xe_sync_entry *syncs, u32 num_syncs)
{
- u32 divisor = uvma->userptr.divisor ? uvma->userptr.divisor : 2;
- static u32 count;
+ int i;
- if (count++ % divisor == divisor - 1) {
- struct xe_vm *vm = xe_vma_vm(&uvma->vma);
+ for (i = 0; i < num_syncs; i++) {
+ struct dma_fence *fence = syncs[i].fence;
- uvma->userptr.divisor = divisor << 1;
- spin_lock(&vm->userptr.invalidated_lock);
- list_move_tail(&uvma->userptr.invalidate_link,
- &vm->userptr.invalidated);
- spin_unlock(&vm->userptr.invalidated_lock);
- return true;
+ if (fence && !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
+ &fence->flags))
+ return false;
}
- return false;
+ return true;
}
-#else
+static int job_test_add_deps(struct xe_sched_job *job,
+ struct dma_resv *resv,
+ enum dma_resv_usage usage)
+{
+ if (!job) {
+ if (!dma_resv_test_signaled(resv, usage))
+ return -ETIME;
-static bool xe_pt_userptr_inject_eagain(struct xe_userptr_vma *uvma)
+ return 0;
+ }
+
+ return xe_sched_job_add_deps(job, resv, usage);
+}
+
+static int vma_add_deps(struct xe_vma *vma, struct xe_sched_job *job)
{
- return false;
+ struct xe_bo *bo = xe_vma_bo(vma);
+
+ xe_bo_assert_held(bo);
+
+ if (bo && !bo->vm)
+ return job_test_add_deps(job, bo->ttm.base.resv,
+ DMA_RESV_USAGE_KERNEL);
+
+ return 0;
}
-#endif
+static int op_add_deps(struct xe_vm *vm, struct xe_vma_op *op,
+ struct xe_sched_job *job)
+{
+ int err = 0;
-/**
- * struct xe_pt_migrate_pt_update - Callback argument for pre-commit callbacks
- * @base: Base we derive from.
- * @bind: Whether this is a bind or an unbind operation. A bind operation
- * makes the pre-commit callback error with -EAGAIN if it detects a
- * pending invalidation.
- * @locked: Whether the pre-commit callback locked the userptr notifier lock
- * and it needs unlocking.
- */
-struct xe_pt_migrate_pt_update {
- struct xe_migrate_pt_update base;
- bool bind;
- bool locked;
-};
+ switch (op->base.op) {
+ case DRM_GPUVA_OP_MAP:
+ if (!op->map.immediate && xe_vm_in_fault_mode(vm))
+ break;
+
+ err = vma_add_deps(op->map.vma, job);
+ break;
+ case DRM_GPUVA_OP_REMAP:
+ if (op->remap.prev)
+ err = vma_add_deps(op->remap.prev, job);
+ if (!err && op->remap.next)
+ err = vma_add_deps(op->remap.next, job);
+ break;
+ case DRM_GPUVA_OP_UNMAP:
+ break;
+ case DRM_GPUVA_OP_PREFETCH:
+ err = vma_add_deps(gpuva_to_vma(op->base.prefetch.va), job);
+ break;
+ default:
+ drm_warn(&vm->xe->drm, "NOT POSSIBLE");
+ }
+
+ return err;
+}
-/*
- * This function adds the needed dependencies to a page-table update job
- * to make sure racing jobs for separate bind engines don't race writing
- * to the same page-table range, wreaking havoc. Initially use a single
- * fence for the entire VM. An optimization would use smaller granularity.
- */
static int xe_pt_vm_dependencies(struct xe_sched_job *job,
- struct xe_range_fence_tree *rftree,
- u64 start, u64 last)
+ struct xe_vm *vm,
+ struct xe_vma_ops *vops,
+ struct xe_vm_pgtable_update_ops *pt_update_ops,
+ struct xe_range_fence_tree *rftree)
{
struct xe_range_fence *rtfence;
struct dma_fence *fence;
- int err;
+ struct xe_vma_op *op;
+ int err = 0, i;
+
+ xe_vm_assert_held(vm);
- rtfence = xe_range_fence_tree_first(rftree, start, last);
+ if (!job && !no_in_syncs(vops->syncs, vops->num_syncs))
+ return -ETIME;
+
+ if (!job && !xe_exec_queue_is_idle(pt_update_ops->q))
+ return -ETIME;
+
+ if (pt_update_ops->wait_vm_bookkeep || pt_update_ops->wait_vm_kernel) {
+ err = job_test_add_deps(job, xe_vm_resv(vm),
+ pt_update_ops->wait_vm_bookkeep ?
+ DMA_RESV_USAGE_BOOKKEEP :
+ DMA_RESV_USAGE_KERNEL);
+ if (err)
+ return err;
+ }
+
+ rtfence = xe_range_fence_tree_first(rftree, pt_update_ops->start,
+ pt_update_ops->last);
while (rtfence) {
fence = rtfence->fence;
@@ -1029,80 +1138,173 @@ static int xe_pt_vm_dependencies(struct xe_sched_job *job,
return err;
}
- rtfence = xe_range_fence_tree_next(rtfence, start, last);
+ rtfence = xe_range_fence_tree_next(rtfence,
+ pt_update_ops->start,
+ pt_update_ops->last);
}
- return 0;
+ list_for_each_entry(op, &vops->list, link) {
+ err = op_add_deps(vm, op, job);
+ if (err)
+ return err;
+ }
+
+ if (job)
+ err = xe_sched_job_last_fence_add_dep(job, vm);
+ else
+ err = xe_exec_queue_last_fence_test_dep(pt_update_ops->q, vm);
+
+ for (i = 0; job && !err && i < vops->num_syncs; i++)
+ err = xe_sync_entry_add_deps(&vops->syncs[i], job);
+
+ return err;
}
static int xe_pt_pre_commit(struct xe_migrate_pt_update *pt_update)
{
- struct xe_range_fence_tree *rftree =
- &xe_vma_vm(pt_update->vma)->rftree[pt_update->tile_id];
+ struct xe_vma_ops *vops = pt_update->vops;
+ struct xe_vm *vm = vops->vm;
+ struct xe_range_fence_tree *rftree = &vm->rftree[pt_update->tile_id];
+ struct xe_vm_pgtable_update_ops *pt_update_ops =
+ &vops->pt_update_ops[pt_update->tile_id];
+
+ return xe_pt_vm_dependencies(pt_update->job, vm, pt_update->vops,
+ pt_update_ops, rftree);
+}
- return xe_pt_vm_dependencies(pt_update->job, rftree,
- pt_update->start, pt_update->last);
+#ifdef CONFIG_DRM_XE_USERPTR_INVAL_INJECT
+
+static bool xe_pt_userptr_inject_eagain(struct xe_userptr_vma *uvma)
+{
+ u32 divisor = uvma->userptr.divisor ? uvma->userptr.divisor : 2;
+ static u32 count;
+
+ if (count++ % divisor == divisor - 1) {
+ uvma->userptr.divisor = divisor << 1;
+ return true;
+ }
+
+ return false;
}
-static int xe_pt_userptr_pre_commit(struct xe_migrate_pt_update *pt_update)
+#else
+
+static bool xe_pt_userptr_inject_eagain(struct xe_userptr_vma *uvma)
{
- struct xe_pt_migrate_pt_update *userptr_update =
- container_of(pt_update, typeof(*userptr_update), base);
- struct xe_userptr_vma *uvma = to_userptr_vma(pt_update->vma);
- unsigned long notifier_seq = uvma->userptr.notifier_seq;
- struct xe_vm *vm = xe_vma_vm(&uvma->vma);
- int err = xe_pt_vm_dependencies(pt_update->job,
- &vm->rftree[pt_update->tile_id],
- pt_update->start,
- pt_update->last);
+ return false;
+}
- if (err)
- return err;
+#endif
- userptr_update->locked = false;
+static int vma_check_userptr(struct xe_vm *vm, struct xe_vma *vma,
+ struct xe_vm_pgtable_update_ops *pt_update)
+{
+ struct xe_userptr_vma *uvma;
+ unsigned long notifier_seq;
- /*
- * Wait until nobody is running the invalidation notifier, and
- * since we're exiting the loop holding the notifier lock,
- * nobody can proceed invalidating either.
- *
- * Note that we don't update the vma->userptr.notifier_seq since
- * we don't update the userptr pages.
- */
- do {
- down_read(&vm->userptr.notifier_lock);
- if (!mmu_interval_read_retry(&uvma->userptr.notifier,
- notifier_seq))
- break;
+ lockdep_assert_held_read(&vm->userptr.notifier_lock);
- up_read(&vm->userptr.notifier_lock);
+ if (!xe_vma_is_userptr(vma))
+ return 0;
- if (userptr_update->bind)
- return -EAGAIN;
+ uvma = to_userptr_vma(vma);
+ notifier_seq = uvma->userptr.notifier_seq;
- notifier_seq = mmu_interval_read_begin(&uvma->userptr.notifier);
- } while (true);
+ if (uvma->userptr.initial_bind && !xe_vm_in_fault_mode(vm))
+ return 0;
- /* Inject errors to test_whether they are handled correctly */
- if (userptr_update->bind && xe_pt_userptr_inject_eagain(uvma)) {
- up_read(&vm->userptr.notifier_lock);
+ if (!mmu_interval_read_retry(&uvma->userptr.notifier,
+ notifier_seq) &&
+ !xe_pt_userptr_inject_eagain(uvma))
+ return 0;
+
+ if (xe_vm_in_fault_mode(vm)) {
return -EAGAIN;
- }
+ } else {
+ spin_lock(&vm->userptr.invalidated_lock);
+ list_move_tail(&uvma->userptr.invalidate_link,
+ &vm->userptr.invalidated);
+ spin_unlock(&vm->userptr.invalidated_lock);
- userptr_update->locked = true;
+ if (xe_vm_in_preempt_fence_mode(vm)) {
+ struct dma_resv_iter cursor;
+ struct dma_fence *fence;
+ long err;
+
+ dma_resv_iter_begin(&cursor, xe_vm_resv(vm),
+ DMA_RESV_USAGE_BOOKKEEP);
+ dma_resv_for_each_fence_unlocked(&cursor, fence)
+ dma_fence_enable_sw_signaling(fence);
+ dma_resv_iter_end(&cursor);
+
+ err = dma_resv_wait_timeout(xe_vm_resv(vm),
+ DMA_RESV_USAGE_BOOKKEEP,
+ false, MAX_SCHEDULE_TIMEOUT);
+ XE_WARN_ON(err <= 0);
+ }
+ }
return 0;
}
-static const struct xe_migrate_pt_update_ops bind_ops = {
- .populate = xe_vm_populate_pgtable,
- .pre_commit = xe_pt_pre_commit,
-};
+static int op_check_userptr(struct xe_vm *vm, struct xe_vma_op *op,
+ struct xe_vm_pgtable_update_ops *pt_update)
+{
+ int err = 0;
-static const struct xe_migrate_pt_update_ops userptr_bind_ops = {
- .populate = xe_vm_populate_pgtable,
- .pre_commit = xe_pt_userptr_pre_commit,
-};
+ lockdep_assert_held_read(&vm->userptr.notifier_lock);
+
+ switch (op->base.op) {
+ case DRM_GPUVA_OP_MAP:
+ if (!op->map.immediate && xe_vm_in_fault_mode(vm))
+ break;
+
+ err = vma_check_userptr(vm, op->map.vma, pt_update);
+ break;
+ case DRM_GPUVA_OP_REMAP:
+ if (op->remap.prev)
+ err = vma_check_userptr(vm, op->remap.prev, pt_update);
+ if (!err && op->remap.next)
+ err = vma_check_userptr(vm, op->remap.next, pt_update);
+ break;
+ case DRM_GPUVA_OP_UNMAP:
+ break;
+ case DRM_GPUVA_OP_PREFETCH:
+ err = vma_check_userptr(vm, gpuva_to_vma(op->base.prefetch.va),
+ pt_update);
+ break;
+ default:
+ drm_warn(&vm->xe->drm, "NOT POSSIBLE");
+ }
+
+ return err;
+}
+
+static int xe_pt_userptr_pre_commit(struct xe_migrate_pt_update *pt_update)
+{
+ struct xe_vm *vm = pt_update->vops->vm;
+ struct xe_vma_ops *vops = pt_update->vops;
+ struct xe_vm_pgtable_update_ops *pt_update_ops =
+ &vops->pt_update_ops[pt_update->tile_id];
+ struct xe_vma_op *op;
+ int err;
+
+ err = xe_pt_pre_commit(pt_update);
+ if (err)
+ return err;
+
+ down_read(&vm->userptr.notifier_lock);
+
+ list_for_each_entry(op, &vops->list, link) {
+ err = op_check_userptr(vm, op, pt_update_ops);
+ if (err) {
+ up_read(&vm->userptr.notifier_lock);
+ break;
+ }
+ }
+
+ return err;
+}
struct invalidation_fence {
struct xe_gt_tlb_invalidation_fence base;
@@ -1115,23 +1317,6 @@ struct invalidation_fence {
u32 asid;
};
-static const char *
-invalidation_fence_get_driver_name(struct dma_fence *dma_fence)
-{
- return "xe";
-}
-
-static const char *
-invalidation_fence_get_timeline_name(struct dma_fence *dma_fence)
-{
- return "invalidation_fence";
-}
-
-static const struct dma_fence_ops invalidation_fence_ops = {
- .get_driver_name = invalidation_fence_get_driver_name,
- .get_timeline_name = invalidation_fence_get_timeline_name,
-};
-
static void invalidation_fence_cb(struct dma_fence *fence,
struct dma_fence_cb *cb)
{
@@ -1161,24 +1346,17 @@ static void invalidation_fence_work_func(struct work_struct *w)
ifence->end, ifence->asid);
}
-static int invalidation_fence_init(struct xe_gt *gt,
- struct invalidation_fence *ifence,
- struct dma_fence *fence,
- u64 start, u64 end, u32 asid)
+static void invalidation_fence_init(struct xe_gt *gt,
+ struct invalidation_fence *ifence,
+ struct dma_fence *fence,
+ u64 start, u64 end, u32 asid)
{
int ret;
trace_xe_gt_tlb_invalidation_fence_create(gt_to_xe(gt), &ifence->base);
- spin_lock_irq(&gt->tlb_invalidation.lock);
- dma_fence_init(&ifence->base.base, &invalidation_fence_ops,
- &gt->tlb_invalidation.lock,
- dma_fence_context_alloc(1), 1);
- spin_unlock_irq(&gt->tlb_invalidation.lock);
-
- INIT_LIST_HEAD(&ifence->base.link);
+ xe_gt_tlb_invalidation_fence_init(gt, &ifence->base, false);
- dma_fence_get(&ifence->base.base); /* Ref for caller */
ifence->fence = fence;
ifence->gt = gt;
ifence->start = start;
@@ -1196,192 +1374,6 @@ static int invalidation_fence_init(struct xe_gt *gt,
}
xe_gt_assert(gt, !ret || ret == -ENOENT);
-
- return ret && ret != -ENOENT ? ret : 0;
-}
-
-static void xe_pt_calc_rfence_interval(struct xe_vma *vma,
- struct xe_pt_migrate_pt_update *update,
- struct xe_vm_pgtable_update *entries,
- u32 num_entries)
-{
- int i, level = 0;
-
- for (i = 0; i < num_entries; i++) {
- const struct xe_vm_pgtable_update *entry = &entries[i];
-
- if (entry->pt->level > level)
- level = entry->pt->level;
- }
-
- /* Greedy (non-optimal) calculation but simple */
- update->base.start = ALIGN_DOWN(xe_vma_start(vma),
- 0x1ull << xe_pt_shift(level));
- update->base.last = ALIGN(xe_vma_end(vma),
- 0x1ull << xe_pt_shift(level)) - 1;
-}
-
-/**
- * __xe_pt_bind_vma() - Build and connect a page-table tree for the vma
- * address range.
- * @tile: The tile to bind for.
- * @vma: The vma to bind.
- * @q: The exec_queue with which to do pipelined page-table updates.
- * @syncs: Entries to sync on before binding the built tree to the live vm tree.
- * @num_syncs: Number of @sync entries.
- * @rebind: Whether we're rebinding this vma to the same address range without
- * an unbind in-between.
- *
- * This function builds a page-table tree (see xe_pt_stage_bind() for more
- * information on page-table building), and the xe_vm_pgtable_update entries
- * abstracting the operations needed to attach it to the main vm tree. It
- * then takes the relevant locks and updates the metadata side of the main
- * vm tree and submits the operations for pipelined attachment of the
- * gpu page-table to the vm main tree, (which can be done either by the
- * cpu and the GPU).
- *
- * Return: A valid dma-fence representing the pipelined attachment operation
- * on success, an error pointer on error.
- */
-struct dma_fence *
-__xe_pt_bind_vma(struct xe_tile *tile, struct xe_vma *vma, struct xe_exec_queue *q,
- struct xe_sync_entry *syncs, u32 num_syncs,
- bool rebind)
-{
- struct xe_vm_pgtable_update entries[XE_VM_MAX_LEVEL * 2 + 1];
- struct xe_pt_migrate_pt_update bind_pt_update = {
- .base = {
- .ops = xe_vma_is_userptr(vma) ? &userptr_bind_ops : &bind_ops,
- .vma = vma,
- .tile_id = tile->id,
- },
- .bind = true,
- };
- struct xe_vm *vm = xe_vma_vm(vma);
- u32 num_entries;
- struct dma_fence *fence;
- struct invalidation_fence *ifence = NULL;
- struct xe_range_fence *rfence;
- int err;
-
- bind_pt_update.locked = false;
- xe_bo_assert_held(xe_vma_bo(vma));
- xe_vm_assert_held(vm);
-
- vm_dbg(&xe_vma_vm(vma)->xe->drm,
- "Preparing bind, with range [%llx...%llx) engine %p.\n",
- xe_vma_start(vma), xe_vma_end(vma), q);
-
- err = xe_pt_prepare_bind(tile, vma, entries, &num_entries);
- if (err)
- goto err;
-
- err = dma_resv_reserve_fences(xe_vm_resv(vm), 1);
- if (!err && !xe_vma_has_no_bo(vma) && !xe_vma_bo(vma)->vm)
- err = dma_resv_reserve_fences(xe_vma_bo(vma)->ttm.base.resv, 1);
- if (err)
- goto err;
-
- xe_tile_assert(tile, num_entries <= ARRAY_SIZE(entries));
-
- xe_vm_dbg_print_entries(tile_to_xe(tile), entries, num_entries);
- xe_pt_calc_rfence_interval(vma, &bind_pt_update, entries,
- num_entries);
-
- /*
- * If rebind, we have to invalidate TLB on !LR vms to invalidate
- * cached PTEs point to freed memory. on LR vms this is done
- * automatically when the context is re-enabled by the rebind worker,
- * or in fault mode it was invalidated on PTE zapping.
- *
- * If !rebind, and scratch enabled VMs, there is a chance the scratch
- * PTE is already cached in the TLB so it needs to be invalidated.
- * on !LR VMs this is done in the ring ops preceding a batch, but on
- * non-faulting LR, in particular on user-space batch buffer chaining,
- * it needs to be done here.
- */
- if ((!rebind && xe_vm_has_scratch(vm) && xe_vm_in_preempt_fence_mode(vm))) {
- ifence = kzalloc(sizeof(*ifence), GFP_KERNEL);
- if (!ifence)
- return ERR_PTR(-ENOMEM);
- } else if (rebind && !xe_vm_in_lr_mode(vm)) {
- /* We bump also if batch_invalidate_tlb is true */
- vm->tlb_flush_seqno++;
- }
-
- rfence = kzalloc(sizeof(*rfence), GFP_KERNEL);
- if (!rfence) {
- kfree(ifence);
- return ERR_PTR(-ENOMEM);
- }
-
- fence = xe_migrate_update_pgtables(tile->migrate,
- vm, xe_vma_bo(vma), q,
- entries, num_entries,
- syncs, num_syncs,
- &bind_pt_update.base);
- if (!IS_ERR(fence)) {
- bool last_munmap_rebind = vma->gpuva.flags & XE_VMA_LAST_REBIND;
- LLIST_HEAD(deferred);
- int err;
-
- err = xe_range_fence_insert(&vm->rftree[tile->id], rfence,
- &xe_range_fence_kfree_ops,
- bind_pt_update.base.start,
- bind_pt_update.base.last, fence);
- if (err)
- dma_fence_wait(fence, false);
-
- /* TLB invalidation must be done before signaling rebind */
- if (ifence) {
- int err = invalidation_fence_init(tile->primary_gt,
- ifence, fence,
- xe_vma_start(vma),
- xe_vma_end(vma),
- xe_vma_vm(vma)->usm.asid);
- if (err) {
- dma_fence_put(fence);
- kfree(ifence);
- return ERR_PTR(err);
- }
- fence = &ifence->base.base;
- }
-
- /* add shared fence now for pagetable delayed destroy */
- dma_resv_add_fence(xe_vm_resv(vm), fence, rebind ||
- last_munmap_rebind ?
- DMA_RESV_USAGE_KERNEL :
- DMA_RESV_USAGE_BOOKKEEP);
-
- if (!xe_vma_has_no_bo(vma) && !xe_vma_bo(vma)->vm)
- dma_resv_add_fence(xe_vma_bo(vma)->ttm.base.resv, fence,
- DMA_RESV_USAGE_BOOKKEEP);
- xe_pt_commit_bind(vma, entries, num_entries, rebind,
- bind_pt_update.locked ? &deferred : NULL);
-
- /* This vma is live (again?) now */
- vma->tile_present |= BIT(tile->id);
-
- if (bind_pt_update.locked) {
- to_userptr_vma(vma)->userptr.initial_bind = true;
- up_read(&vm->userptr.notifier_lock);
- xe_bo_put_commit(&deferred);
- }
- if (!rebind && last_munmap_rebind &&
- xe_vm_in_preempt_fence_mode(vm))
- xe_vm_queue_rebind_worker(vm);
- } else {
- kfree(rfence);
- kfree(ifence);
- if (bind_pt_update.locked)
- up_read(&vm->userptr.notifier_lock);
- xe_pt_abort_bind(vma, entries, num_entries);
- }
-
- return fence;
-
-err:
- return ERR_PTR(err);
}
struct xe_pt_stage_unbind_walk {
@@ -1466,6 +1458,7 @@ xe_pt_stage_unbind_post_descend(struct xe_ptw *parent, pgoff_t offset,
struct xe_pt *xe_child = container_of(*child, typeof(*xe_child), base);
pgoff_t end_offset;
u64 size = 1ull << walk->shifts[--level];
+ int err;
if (!IS_ALIGNED(addr, size))
addr = xe_walk->modified_start;
@@ -1481,7 +1474,10 @@ xe_pt_stage_unbind_post_descend(struct xe_ptw *parent, pgoff_t offset,
&end_offset))
return 0;
- (void)xe_pt_new_shared(&xe_walk->wupd, xe_child, offset, false);
+ err = xe_pt_new_shared(&xe_walk->wupd, xe_child, offset, true);
+ if (err)
+ return err;
+
xe_walk->wupd.updates[level].update->qwords = end_offset - offset;
return 0;
@@ -1534,8 +1530,8 @@ xe_migrate_clear_pgtable_callback(struct xe_migrate_pt_update *pt_update,
void *ptr, u32 qword_ofs, u32 num_qwords,
const struct xe_vm_pgtable_update *update)
{
- struct xe_vma *vma = pt_update->vma;
- u64 empty = __xe_pt_empty_pte(tile, xe_vma_vm(vma), update->pt->level);
+ struct xe_vm *vm = pt_update->vops->vm;
+ u64 empty = __xe_pt_empty_pte(tile, vm, update->pt->level);
int i;
if (map && map->is_iomem)
@@ -1549,181 +1545,571 @@ xe_migrate_clear_pgtable_callback(struct xe_migrate_pt_update *pt_update,
memset64(ptr, empty, num_qwords);
}
+static void xe_pt_abort_unbind(struct xe_vma *vma,
+ struct xe_vm_pgtable_update *entries,
+ u32 num_entries)
+{
+ int i, j;
+
+ xe_pt_commit_locks_assert(vma);
+
+ for (i = num_entries - 1; i >= 0; --i) {
+ struct xe_vm_pgtable_update *entry = &entries[i];
+ struct xe_pt *pt = entry->pt;
+ struct xe_pt_dir *pt_dir = as_xe_pt_dir(pt);
+
+ pt->num_live += entry->qwords;
+
+ if (!pt->level)
+ continue;
+
+ for (j = entry->ofs; j < entry->ofs + entry->qwords; j++)
+ pt_dir->children[j] =
+ entries[i].pt_entries[j - entry->ofs].pt ?
+ &entries[i].pt_entries[j - entry->ofs].pt->base : NULL;
+ }
+}
+
static void
-xe_pt_commit_unbind(struct xe_vma *vma,
- struct xe_vm_pgtable_update *entries, u32 num_entries,
- struct llist_head *deferred)
+xe_pt_commit_prepare_unbind(struct xe_vma *vma,
+ struct xe_vm_pgtable_update *entries,
+ u32 num_entries)
{
- u32 j;
+ int i, j;
xe_pt_commit_locks_assert(vma);
- for (j = 0; j < num_entries; ++j) {
- struct xe_vm_pgtable_update *entry = &entries[j];
+ for (i = 0; i < num_entries; ++i) {
+ struct xe_vm_pgtable_update *entry = &entries[i];
struct xe_pt *pt = entry->pt;
+ struct xe_pt_dir *pt_dir;
pt->num_live -= entry->qwords;
- if (pt->level) {
- struct xe_pt_dir *pt_dir = as_xe_pt_dir(pt);
- u32 i;
+ if (!pt->level)
+ continue;
+
+ pt_dir = as_xe_pt_dir(pt);
+ for (j = entry->ofs; j < entry->ofs + entry->qwords; j++) {
+ entry->pt_entries[j - entry->ofs].pt =
+ xe_pt_entry(pt_dir, j);
+ pt_dir->children[j] = NULL;
+ }
+ }
+}
+
+static void
+xe_pt_update_ops_rfence_interval(struct xe_vm_pgtable_update_ops *pt_update_ops,
+ struct xe_vma *vma)
+{
+ u32 current_op = pt_update_ops->current_op;
+ struct xe_vm_pgtable_update_op *pt_op = &pt_update_ops->ops[current_op];
+ int i, level = 0;
+ u64 start, last;
- for (i = entry->ofs; i < entry->ofs + entry->qwords;
- i++) {
- if (xe_pt_entry(pt_dir, i))
- xe_pt_destroy(xe_pt_entry(pt_dir, i),
- xe_vma_vm(vma)->flags, deferred);
+ for (i = 0; i < pt_op->num_entries; i++) {
+ const struct xe_vm_pgtable_update *entry = &pt_op->entries[i];
- pt_dir->children[i] = NULL;
- }
+ if (entry->pt->level > level)
+ level = entry->pt->level;
+ }
+
+ /* Greedy (non-optimal) calculation but simple */
+ start = ALIGN_DOWN(xe_vma_start(vma), 0x1ull << xe_pt_shift(level));
+ last = ALIGN(xe_vma_end(vma), 0x1ull << xe_pt_shift(level)) - 1;
+
+ if (start < pt_update_ops->start)
+ pt_update_ops->start = start;
+ if (last > pt_update_ops->last)
+ pt_update_ops->last = last;
+}
+
+static int vma_reserve_fences(struct xe_device *xe, struct xe_vma *vma)
+{
+ if (!xe_vma_has_no_bo(vma) && !xe_vma_bo(vma)->vm)
+ return dma_resv_reserve_fences(xe_vma_bo(vma)->ttm.base.resv,
+ xe->info.tile_count);
+
+ return 0;
+}
+
+static int bind_op_prepare(struct xe_vm *vm, struct xe_tile *tile,
+ struct xe_vm_pgtable_update_ops *pt_update_ops,
+ struct xe_vma *vma)
+{
+ u32 current_op = pt_update_ops->current_op;
+ struct xe_vm_pgtable_update_op *pt_op = &pt_update_ops->ops[current_op];
+ int err;
+
+ xe_bo_assert_held(xe_vma_bo(vma));
+
+ vm_dbg(&xe_vma_vm(vma)->xe->drm,
+ "Preparing bind, with range [%llx...%llx)\n",
+ xe_vma_start(vma), xe_vma_end(vma) - 1);
+
+ pt_op->vma = NULL;
+ pt_op->bind = true;
+ pt_op->rebind = BIT(tile->id) & vma->tile_present;
+
+ err = vma_reserve_fences(tile_to_xe(tile), vma);
+ if (err)
+ return err;
+
+ err = xe_pt_prepare_bind(tile, vma, pt_op->entries,
+ &pt_op->num_entries);
+ if (!err) {
+ xe_tile_assert(tile, pt_op->num_entries <=
+ ARRAY_SIZE(pt_op->entries));
+ xe_vm_dbg_print_entries(tile_to_xe(tile), pt_op->entries,
+ pt_op->num_entries, true);
+
+ xe_pt_update_ops_rfence_interval(pt_update_ops, vma);
+ ++pt_update_ops->current_op;
+ pt_update_ops->needs_userptr_lock |= xe_vma_is_userptr(vma);
+
+ /*
+ * If rebind, we have to invalidate TLB on !LR vms to invalidate
+ * cached PTEs point to freed memory. On LR vms this is done
+ * automatically when the context is re-enabled by the rebind worker,
+ * or in fault mode it was invalidated on PTE zapping.
+ *
+ * If !rebind, and scratch enabled VMs, there is a chance the scratch
+ * PTE is already cached in the TLB so it needs to be invalidated.
+ * On !LR VMs this is done in the ring ops preceding a batch, but on
+ * non-faulting LR, in particular on user-space batch buffer chaining,
+ * it needs to be done here.
+ */
+ if ((!pt_op->rebind && xe_vm_has_scratch(vm) &&
+ xe_vm_in_preempt_fence_mode(vm)))
+ pt_update_ops->needs_invalidation = true;
+ else if (pt_op->rebind && !xe_vm_in_lr_mode(vm))
+ /* We bump also if batch_invalidate_tlb is true */
+ vm->tlb_flush_seqno++;
+
+ vma->tile_staged |= BIT(tile->id);
+ pt_op->vma = vma;
+ xe_pt_commit_prepare_bind(vma, pt_op->entries,
+ pt_op->num_entries, pt_op->rebind);
+ } else {
+ xe_pt_cancel_bind(vma, pt_op->entries, pt_op->num_entries);
+ }
+
+ return err;
+}
+
+static int unbind_op_prepare(struct xe_tile *tile,
+ struct xe_vm_pgtable_update_ops *pt_update_ops,
+ struct xe_vma *vma)
+{
+ u32 current_op = pt_update_ops->current_op;
+ struct xe_vm_pgtable_update_op *pt_op = &pt_update_ops->ops[current_op];
+ int err;
+
+ if (!((vma->tile_present | vma->tile_staged) & BIT(tile->id)))
+ return 0;
+
+ xe_bo_assert_held(xe_vma_bo(vma));
+
+ vm_dbg(&xe_vma_vm(vma)->xe->drm,
+ "Preparing unbind, with range [%llx...%llx)\n",
+ xe_vma_start(vma), xe_vma_end(vma) - 1);
+
+ /*
+ * Wait for invalidation to complete. Can corrupt internal page table
+ * state if an invalidation is running while preparing an unbind.
+ */
+ if (xe_vma_is_userptr(vma) && xe_vm_in_fault_mode(xe_vma_vm(vma)))
+ mmu_interval_read_begin(&to_userptr_vma(vma)->userptr.notifier);
+
+ pt_op->vma = vma;
+ pt_op->bind = false;
+ pt_op->rebind = false;
+
+ err = vma_reserve_fences(tile_to_xe(tile), vma);
+ if (err)
+ return err;
+
+ pt_op->num_entries = xe_pt_stage_unbind(tile, vma, pt_op->entries);
+
+ xe_vm_dbg_print_entries(tile_to_xe(tile), pt_op->entries,
+ pt_op->num_entries, false);
+ xe_pt_update_ops_rfence_interval(pt_update_ops, vma);
+ ++pt_update_ops->current_op;
+ pt_update_ops->needs_userptr_lock |= xe_vma_is_userptr(vma);
+ pt_update_ops->needs_invalidation = true;
+
+ xe_pt_commit_prepare_unbind(vma, pt_op->entries, pt_op->num_entries);
+
+ return 0;
+}
+
+static int op_prepare(struct xe_vm *vm,
+ struct xe_tile *tile,
+ struct xe_vm_pgtable_update_ops *pt_update_ops,
+ struct xe_vma_op *op)
+{
+ int err = 0;
+
+ xe_vm_assert_held(vm);
+
+ switch (op->base.op) {
+ case DRM_GPUVA_OP_MAP:
+ if (!op->map.immediate && xe_vm_in_fault_mode(vm))
+ break;
+
+ err = bind_op_prepare(vm, tile, pt_update_ops, op->map.vma);
+ pt_update_ops->wait_vm_kernel = true;
+ break;
+ case DRM_GPUVA_OP_REMAP:
+ err = unbind_op_prepare(tile, pt_update_ops,
+ gpuva_to_vma(op->base.remap.unmap->va));
+
+ if (!err && op->remap.prev) {
+ err = bind_op_prepare(vm, tile, pt_update_ops,
+ op->remap.prev);
+ pt_update_ops->wait_vm_bookkeep = true;
}
+ if (!err && op->remap.next) {
+ err = bind_op_prepare(vm, tile, pt_update_ops,
+ op->remap.next);
+ pt_update_ops->wait_vm_bookkeep = true;
+ }
+ break;
+ case DRM_GPUVA_OP_UNMAP:
+ err = unbind_op_prepare(tile, pt_update_ops,
+ gpuva_to_vma(op->base.unmap.va));
+ break;
+ case DRM_GPUVA_OP_PREFETCH:
+ err = bind_op_prepare(vm, tile, pt_update_ops,
+ gpuva_to_vma(op->base.prefetch.va));
+ pt_update_ops->wait_vm_kernel = true;
+ break;
+ default:
+ drm_warn(&vm->xe->drm, "NOT POSSIBLE");
}
+
+ return err;
+}
+
+static void
+xe_pt_update_ops_init(struct xe_vm_pgtable_update_ops *pt_update_ops)
+{
+ init_llist_head(&pt_update_ops->deferred);
+ pt_update_ops->start = ~0x0ull;
+ pt_update_ops->last = 0x0ull;
}
-static const struct xe_migrate_pt_update_ops unbind_ops = {
- .populate = xe_migrate_clear_pgtable_callback,
+/**
+ * xe_pt_update_ops_prepare() - Prepare PT update operations
+ * @tile: Tile of PT update operations
+ * @vops: VMA operationa
+ *
+ * Prepare PT update operations which includes updating internal PT state,
+ * allocate memory for page tables, populate page table being pruned in, and
+ * create PT update operations for leaf insertion / removal.
+ *
+ * Return: 0 on success, negative error code on error.
+ */
+int xe_pt_update_ops_prepare(struct xe_tile *tile, struct xe_vma_ops *vops)
+{
+ struct xe_vm_pgtable_update_ops *pt_update_ops =
+ &vops->pt_update_ops[tile->id];
+ struct xe_vma_op *op;
+ int err;
+
+ lockdep_assert_held(&vops->vm->lock);
+ xe_vm_assert_held(vops->vm);
+
+ xe_pt_update_ops_init(pt_update_ops);
+
+ err = dma_resv_reserve_fences(xe_vm_resv(vops->vm),
+ tile_to_xe(tile)->info.tile_count);
+ if (err)
+ return err;
+
+ list_for_each_entry(op, &vops->list, link) {
+ err = op_prepare(vops->vm, tile, pt_update_ops, op);
+
+ if (err)
+ return err;
+ }
+
+ xe_tile_assert(tile, pt_update_ops->current_op <=
+ pt_update_ops->num_ops);
+
+#ifdef TEST_VM_OPS_ERROR
+ if (vops->inject_error &&
+ vops->vm->xe->vm_inject_error_position == FORCE_OP_ERROR_PREPARE)
+ return -ENOSPC;
+#endif
+
+ return 0;
+}
+
+static void bind_op_commit(struct xe_vm *vm, struct xe_tile *tile,
+ struct xe_vm_pgtable_update_ops *pt_update_ops,
+ struct xe_vma *vma, struct dma_fence *fence)
+{
+ if (!xe_vma_has_no_bo(vma) && !xe_vma_bo(vma)->vm)
+ dma_resv_add_fence(xe_vma_bo(vma)->ttm.base.resv, fence,
+ pt_update_ops->wait_vm_bookkeep ?
+ DMA_RESV_USAGE_KERNEL :
+ DMA_RESV_USAGE_BOOKKEEP);
+ vma->tile_present |= BIT(tile->id);
+ vma->tile_staged &= ~BIT(tile->id);
+ if (xe_vma_is_userptr(vma)) {
+ lockdep_assert_held_read(&vm->userptr.notifier_lock);
+ to_userptr_vma(vma)->userptr.initial_bind = true;
+ }
+
+ /*
+ * Kick rebind worker if this bind triggers preempt fences and not in
+ * the rebind worker
+ */
+ if (pt_update_ops->wait_vm_bookkeep &&
+ xe_vm_in_preempt_fence_mode(vm) &&
+ !current->mm)
+ xe_vm_queue_rebind_worker(vm);
+}
+
+static void unbind_op_commit(struct xe_vm *vm, struct xe_tile *tile,
+ struct xe_vm_pgtable_update_ops *pt_update_ops,
+ struct xe_vma *vma, struct dma_fence *fence)
+{
+ if (!xe_vma_has_no_bo(vma) && !xe_vma_bo(vma)->vm)
+ dma_resv_add_fence(xe_vma_bo(vma)->ttm.base.resv, fence,
+ pt_update_ops->wait_vm_bookkeep ?
+ DMA_RESV_USAGE_KERNEL :
+ DMA_RESV_USAGE_BOOKKEEP);
+ vma->tile_present &= ~BIT(tile->id);
+ if (!vma->tile_present) {
+ list_del_init(&vma->combined_links.rebind);
+ if (xe_vma_is_userptr(vma)) {
+ lockdep_assert_held_read(&vm->userptr.notifier_lock);
+
+ spin_lock(&vm->userptr.invalidated_lock);
+ list_del_init(&to_userptr_vma(vma)->userptr.invalidate_link);
+ spin_unlock(&vm->userptr.invalidated_lock);
+ }
+ }
+}
+
+static void op_commit(struct xe_vm *vm,
+ struct xe_tile *tile,
+ struct xe_vm_pgtable_update_ops *pt_update_ops,
+ struct xe_vma_op *op, struct dma_fence *fence)
+{
+ xe_vm_assert_held(vm);
+
+ switch (op->base.op) {
+ case DRM_GPUVA_OP_MAP:
+ if (!op->map.immediate && xe_vm_in_fault_mode(vm))
+ break;
+
+ bind_op_commit(vm, tile, pt_update_ops, op->map.vma, fence);
+ break;
+ case DRM_GPUVA_OP_REMAP:
+ unbind_op_commit(vm, tile, pt_update_ops,
+ gpuva_to_vma(op->base.remap.unmap->va), fence);
+
+ if (op->remap.prev)
+ bind_op_commit(vm, tile, pt_update_ops, op->remap.prev,
+ fence);
+ if (op->remap.next)
+ bind_op_commit(vm, tile, pt_update_ops, op->remap.next,
+ fence);
+ break;
+ case DRM_GPUVA_OP_UNMAP:
+ unbind_op_commit(vm, tile, pt_update_ops,
+ gpuva_to_vma(op->base.unmap.va), fence);
+ break;
+ case DRM_GPUVA_OP_PREFETCH:
+ bind_op_commit(vm, tile, pt_update_ops,
+ gpuva_to_vma(op->base.prefetch.va), fence);
+ break;
+ default:
+ drm_warn(&vm->xe->drm, "NOT POSSIBLE");
+ }
+}
+
+static const struct xe_migrate_pt_update_ops migrate_ops = {
+ .populate = xe_vm_populate_pgtable,
+ .clear = xe_migrate_clear_pgtable_callback,
.pre_commit = xe_pt_pre_commit,
};
-static const struct xe_migrate_pt_update_ops userptr_unbind_ops = {
- .populate = xe_migrate_clear_pgtable_callback,
+static const struct xe_migrate_pt_update_ops userptr_migrate_ops = {
+ .populate = xe_vm_populate_pgtable,
+ .clear = xe_migrate_clear_pgtable_callback,
.pre_commit = xe_pt_userptr_pre_commit,
};
/**
- * __xe_pt_unbind_vma() - Disconnect and free a page-table tree for the vma
- * address range.
- * @tile: The tile to unbind for.
- * @vma: The vma to unbind.
- * @q: The exec_queue with which to do pipelined page-table updates.
- * @syncs: Entries to sync on before disconnecting the tree to be destroyed.
- * @num_syncs: Number of @sync entries.
+ * xe_pt_update_ops_run() - Run PT update operations
+ * @tile: Tile of PT update operations
+ * @vops: VMA operationa
*
- * This function builds a the xe_vm_pgtable_update entries abstracting the
- * operations needed to detach the page-table tree to be destroyed from the
- * man vm tree.
- * It then takes the relevant locks and submits the operations for
- * pipelined detachment of the gpu page-table from the vm main tree,
- * (which can be done either by the cpu and the GPU), Finally it frees the
- * detached page-table tree.
+ * Run PT update operations which includes committing internal PT state changes,
+ * creating job for PT update operations for leaf insertion / removal, and
+ * installing job fence in various places.
*
- * Return: A valid dma-fence representing the pipelined detachment operation
- * on success, an error pointer on error.
+ * Return: fence on success, negative ERR_PTR on error.
*/
struct dma_fence *
-__xe_pt_unbind_vma(struct xe_tile *tile, struct xe_vma *vma, struct xe_exec_queue *q,
- struct xe_sync_entry *syncs, u32 num_syncs)
+xe_pt_update_ops_run(struct xe_tile *tile, struct xe_vma_ops *vops)
{
- struct xe_vm_pgtable_update entries[XE_VM_MAX_LEVEL * 2 + 1];
- struct xe_pt_migrate_pt_update unbind_pt_update = {
- .base = {
- .ops = xe_vma_is_userptr(vma) ? &userptr_unbind_ops :
- &unbind_ops,
- .vma = vma,
- .tile_id = tile->id,
- },
- };
- struct xe_vm *vm = xe_vma_vm(vma);
- u32 num_entries;
- struct dma_fence *fence = NULL;
- struct invalidation_fence *ifence;
+ struct xe_vm *vm = vops->vm;
+ struct xe_vm_pgtable_update_ops *pt_update_ops =
+ &vops->pt_update_ops[tile->id];
+ struct dma_fence *fence;
+ struct invalidation_fence *ifence = NULL;
struct xe_range_fence *rfence;
- int err;
-
- LLIST_HEAD(deferred);
+ struct xe_vma_op *op;
+ int err = 0, i;
+ struct xe_migrate_pt_update update = {
+ .ops = pt_update_ops->needs_userptr_lock ?
+ &userptr_migrate_ops :
+ &migrate_ops,
+ .vops = vops,
+ .tile_id = tile->id,
+ };
- xe_bo_assert_held(xe_vma_bo(vma));
+ lockdep_assert_held(&vm->lock);
xe_vm_assert_held(vm);
- vm_dbg(&xe_vma_vm(vma)->xe->drm,
- "Preparing unbind, with range [%llx...%llx) engine %p.\n",
- xe_vma_start(vma), xe_vma_end(vma), q);
-
- num_entries = xe_pt_stage_unbind(tile, vma, entries);
- xe_tile_assert(tile, num_entries <= ARRAY_SIZE(entries));
+ if (!pt_update_ops->current_op) {
+ xe_tile_assert(tile, xe_vm_in_fault_mode(vm));
- xe_vm_dbg_print_entries(tile_to_xe(tile), entries, num_entries);
- xe_pt_calc_rfence_interval(vma, &unbind_pt_update, entries,
- num_entries);
+ return dma_fence_get_stub();
+ }
- err = dma_resv_reserve_fences(xe_vm_resv(vm), 1);
- if (!err && !xe_vma_has_no_bo(vma) && !xe_vma_bo(vma)->vm)
- err = dma_resv_reserve_fences(xe_vma_bo(vma)->ttm.base.resv, 1);
- if (err)
- return ERR_PTR(err);
+#ifdef TEST_VM_OPS_ERROR
+ if (vops->inject_error &&
+ vm->xe->vm_inject_error_position == FORCE_OP_ERROR_RUN)
+ return ERR_PTR(-ENOSPC);
+#endif
- ifence = kzalloc(sizeof(*ifence), GFP_KERNEL);
- if (!ifence)
- return ERR_PTR(-ENOMEM);
+ if (pt_update_ops->needs_invalidation) {
+ ifence = kzalloc(sizeof(*ifence), GFP_KERNEL);
+ if (!ifence) {
+ err = -ENOMEM;
+ goto kill_vm_tile1;
+ }
+ }
rfence = kzalloc(sizeof(*rfence), GFP_KERNEL);
if (!rfence) {
- kfree(ifence);
- return ERR_PTR(-ENOMEM);
+ err = -ENOMEM;
+ goto free_ifence;
}
- /*
- * Even if we were already evicted and unbind to destroy, we need to
- * clear again here. The eviction may have updated pagetables at a
- * lower level, because it needs to be more conservative.
- */
- fence = xe_migrate_update_pgtables(tile->migrate,
- vm, NULL, q ? q :
- vm->q[tile->id],
- entries, num_entries,
- syncs, num_syncs,
- &unbind_pt_update.base);
- if (!IS_ERR(fence)) {
- int err;
-
- err = xe_range_fence_insert(&vm->rftree[tile->id], rfence,
- &xe_range_fence_kfree_ops,
- unbind_pt_update.base.start,
- unbind_pt_update.base.last, fence);
- if (err)
- dma_fence_wait(fence, false);
+ fence = xe_migrate_update_pgtables(tile->migrate, &update);
+ if (IS_ERR(fence)) {
+ err = PTR_ERR(fence);
+ goto free_rfence;
+ }
- /* TLB invalidation must be done before signaling unbind */
- err = invalidation_fence_init(tile->primary_gt, ifence, fence,
- xe_vma_start(vma),
- xe_vma_end(vma),
- xe_vma_vm(vma)->usm.asid);
- if (err) {
- dma_fence_put(fence);
- kfree(ifence);
- return ERR_PTR(err);
- }
- fence = &ifence->base.base;
+ /* Point of no return - VM killed if failure after this */
+ for (i = 0; i < pt_update_ops->current_op; ++i) {
+ struct xe_vm_pgtable_update_op *pt_op = &pt_update_ops->ops[i];
- /* add shared fence now for pagetable delayed destroy */
- dma_resv_add_fence(xe_vm_resv(vm), fence,
- DMA_RESV_USAGE_BOOKKEEP);
+ xe_pt_commit(pt_op->vma, pt_op->entries,
+ pt_op->num_entries, &pt_update_ops->deferred);
+ pt_op->vma = NULL; /* skip in xe_pt_update_ops_abort */
+ }
- /* This fence will be installed by caller when doing eviction */
- if (!xe_vma_has_no_bo(vma) && !xe_vma_bo(vma)->vm)
- dma_resv_add_fence(xe_vma_bo(vma)->ttm.base.resv, fence,
- DMA_RESV_USAGE_BOOKKEEP);
- xe_pt_commit_unbind(vma, entries, num_entries,
- unbind_pt_update.locked ? &deferred : NULL);
- vma->tile_present &= ~BIT(tile->id);
- } else {
- kfree(rfence);
- kfree(ifence);
+ if (xe_range_fence_insert(&vm->rftree[tile->id], rfence,
+ &xe_range_fence_kfree_ops,
+ pt_update_ops->start,
+ pt_update_ops->last, fence))
+ dma_fence_wait(fence, false);
+
+ /* tlb invalidation must be done before signaling rebind */
+ if (ifence) {
+ invalidation_fence_init(tile->primary_gt, ifence, fence,
+ pt_update_ops->start,
+ pt_update_ops->last, vm->usm.asid);
+ fence = &ifence->base.base;
}
- if (!vma->tile_present)
- list_del_init(&vma->combined_links.rebind);
+ dma_resv_add_fence(xe_vm_resv(vm), fence,
+ pt_update_ops->wait_vm_bookkeep ?
+ DMA_RESV_USAGE_KERNEL :
+ DMA_RESV_USAGE_BOOKKEEP);
- if (unbind_pt_update.locked) {
- xe_tile_assert(tile, xe_vma_is_userptr(vma));
+ list_for_each_entry(op, &vops->list, link)
+ op_commit(vops->vm, tile, pt_update_ops, op, fence);
- if (!vma->tile_present) {
- spin_lock(&vm->userptr.invalidated_lock);
- list_del_init(&to_userptr_vma(vma)->userptr.invalidate_link);
- spin_unlock(&vm->userptr.invalidated_lock);
- }
+ if (pt_update_ops->needs_userptr_lock)
up_read(&vm->userptr.notifier_lock);
- xe_bo_put_commit(&deferred);
- }
return fence;
+
+free_rfence:
+ kfree(rfence);
+free_ifence:
+ kfree(ifence);
+kill_vm_tile1:
+ if (err != -EAGAIN && tile->id)
+ xe_vm_kill(vops->vm, false);
+
+ return ERR_PTR(err);
+}
+
+/**
+ * xe_pt_update_ops_fini() - Finish PT update operations
+ * @tile: Tile of PT update operations
+ * @vops: VMA operations
+ *
+ * Finish PT update operations by committing to destroy page table memory
+ */
+void xe_pt_update_ops_fini(struct xe_tile *tile, struct xe_vma_ops *vops)
+{
+ struct xe_vm_pgtable_update_ops *pt_update_ops =
+ &vops->pt_update_ops[tile->id];
+ int i;
+
+ lockdep_assert_held(&vops->vm->lock);
+ xe_vm_assert_held(vops->vm);
+
+ for (i = 0; i < pt_update_ops->current_op; ++i) {
+ struct xe_vm_pgtable_update_op *pt_op = &pt_update_ops->ops[i];
+
+ xe_pt_free_bind(pt_op->entries, pt_op->num_entries);
+ }
+ xe_bo_put_commit(&vops->pt_update_ops[tile->id].deferred);
+}
+
+/**
+ * xe_pt_update_ops_abort() - Abort PT update operations
+ * @tile: Tile of PT update operations
+ * @vops: VMA operationa
+ *
+ * Abort PT update operations by unwinding internal PT state
+ */
+void xe_pt_update_ops_abort(struct xe_tile *tile, struct xe_vma_ops *vops)
+{
+ struct xe_vm_pgtable_update_ops *pt_update_ops =
+ &vops->pt_update_ops[tile->id];
+ int i;
+
+ lockdep_assert_held(&vops->vm->lock);
+ xe_vm_assert_held(vops->vm);
+
+ for (i = pt_update_ops->num_ops - 1; i >= 0; --i) {
+ struct xe_vm_pgtable_update_op *pt_op =
+ &pt_update_ops->ops[i];
+
+ if (!pt_op->vma || i >= pt_update_ops->current_op)
+ continue;
+
+ if (pt_op->bind)
+ xe_pt_abort_bind(pt_op->vma, pt_op->entries,
+ pt_op->num_entries,
+ pt_op->rebind);
+ else
+ xe_pt_abort_unbind(pt_op->vma, pt_op->entries,
+ pt_op->num_entries);
+ }
+
+ xe_bo_put_commit(&vops->pt_update_ops[tile->id].deferred);
}
diff --git a/drivers/gpu/drm/xe/xe_pt.h b/drivers/gpu/drm/xe/xe_pt.h
index 71a4fbfcff43..9ab386431cad 100644
--- a/drivers/gpu/drm/xe/xe_pt.h
+++ b/drivers/gpu/drm/xe/xe_pt.h
@@ -17,6 +17,7 @@ struct xe_sync_entry;
struct xe_tile;
struct xe_vm;
struct xe_vma;
+struct xe_vma_ops;
/* Largest huge pte is currently 1GiB. May become device dependent. */
#define MAX_HUGEPTE_LEVEL 2
@@ -34,14 +35,11 @@ void xe_pt_populate_empty(struct xe_tile *tile, struct xe_vm *vm,
void xe_pt_destroy(struct xe_pt *pt, u32 flags, struct llist_head *deferred);
-struct dma_fence *
-__xe_pt_bind_vma(struct xe_tile *tile, struct xe_vma *vma, struct xe_exec_queue *q,
- struct xe_sync_entry *syncs, u32 num_syncs,
- bool rebind);
-
-struct dma_fence *
-__xe_pt_unbind_vma(struct xe_tile *tile, struct xe_vma *vma, struct xe_exec_queue *q,
- struct xe_sync_entry *syncs, u32 num_syncs);
+int xe_pt_update_ops_prepare(struct xe_tile *tile, struct xe_vma_ops *vops);
+struct dma_fence *xe_pt_update_ops_run(struct xe_tile *tile,
+ struct xe_vma_ops *vops);
+void xe_pt_update_ops_fini(struct xe_tile *tile, struct xe_vma_ops *vops);
+void xe_pt_update_ops_abort(struct xe_tile *tile, struct xe_vma_ops *vops);
bool xe_pt_zap_ptes(struct xe_tile *tile, struct xe_vma *vma);
diff --git a/drivers/gpu/drm/xe/xe_pt_types.h b/drivers/gpu/drm/xe/xe_pt_types.h
index cee70cb0f014..384cc04de719 100644
--- a/drivers/gpu/drm/xe/xe_pt_types.h
+++ b/drivers/gpu/drm/xe/xe_pt_types.h
@@ -74,4 +74,52 @@ struct xe_vm_pgtable_update {
u32 flags;
};
+/** struct xe_vm_pgtable_update_op - Page table update operation */
+struct xe_vm_pgtable_update_op {
+ /** @entries: entries to update for this operation */
+ struct xe_vm_pgtable_update entries[XE_VM_MAX_LEVEL * 2 + 1];
+ /** @vma: VMA for operation, operation not valid if NULL */
+ struct xe_vma *vma;
+ /** @num_entries: number of entries for this update operation */
+ u32 num_entries;
+ /** @bind: is a bind */
+ bool bind;
+ /** @rebind: is a rebind */
+ bool rebind;
+};
+
+/** struct xe_vm_pgtable_update_ops: page table update operations */
+struct xe_vm_pgtable_update_ops {
+ /** @ops: operations */
+ struct xe_vm_pgtable_update_op *ops;
+ /** @deferred: deferred list to destroy PT entries */
+ struct llist_head deferred;
+ /** @q: exec queue for PT operations */
+ struct xe_exec_queue *q;
+ /** @start: start address of ops */
+ u64 start;
+ /** @last: last address of ops */
+ u64 last;
+ /** @num_ops: number of operations */
+ u32 num_ops;
+ /** @current_op: current operations */
+ u32 current_op;
+ /** @needs_userptr_lock: Needs userptr lock */
+ bool needs_userptr_lock;
+ /** @needs_invalidation: Needs invalidation */
+ bool needs_invalidation;
+ /**
+ * @wait_vm_bookkeep: PT operations need to wait until VM is idle
+ * (bookkeep dma-resv slots are idle) and stage all future VM activity
+ * behind these operations (install PT operations into VM kernel
+ * dma-resv slot).
+ */
+ bool wait_vm_bookkeep;
+ /**
+ * @wait_vm_kernel: PT operations need to wait until VM kernel dma-resv
+ * slots are idle.
+ */
+ bool wait_vm_kernel;
+};
+
#endif
diff --git a/drivers/gpu/drm/xe/xe_query.c b/drivers/gpu/drm/xe/xe_query.c
index 4e01df6b1b7a..73ef6e4c2dc9 100644
--- a/drivers/gpu/drm/xe/xe_query.c
+++ b/drivers/gpu/drm/xe/xe_query.c
@@ -518,7 +518,9 @@ static int query_gt_topology(struct xe_device *xe,
if (err)
return err;
- topo.type = DRM_XE_TOPO_EU_PER_DSS;
+ topo.type = gt->fuse_topo.eu_type == XE_GT_EU_TYPE_SIMD16 ?
+ DRM_XE_TOPO_SIMD16_EU_PER_DSS :
+ DRM_XE_TOPO_EU_PER_DSS;
err = copy_mask(&query_ptr, &topo,
gt->fuse_topo.eu_mask_per_dss,
sizeof(gt->fuse_topo.eu_mask_per_dss));
diff --git a/drivers/gpu/drm/xe/xe_rtp.c b/drivers/gpu/drm/xe/xe_rtp.c
index 02e28274282f..e78ba324dd18 100644
--- a/drivers/gpu/drm/xe/xe_rtp.c
+++ b/drivers/gpu/drm/xe/xe_rtp.c
@@ -217,21 +217,19 @@ void xe_rtp_process_ctx_enable_active_tracking(struct xe_rtp_process_ctx *ctx,
ctx->active_entries = active_entries;
ctx->n_entries = n_entries;
}
+EXPORT_SYMBOL_IF_KUNIT(xe_rtp_process_ctx_enable_active_tracking);
static void rtp_mark_active(struct xe_device *xe,
struct xe_rtp_process_ctx *ctx,
- unsigned int first, unsigned int last)
+ unsigned int idx)
{
if (!ctx->active_entries)
return;
- if (drm_WARN_ON(&xe->drm, last > ctx->n_entries))
+ if (drm_WARN_ON(&xe->drm, idx >= ctx->n_entries))
return;
- if (first == last)
- bitmap_set(ctx->active_entries, first, 1);
- else
- bitmap_set(ctx->active_entries, first, last - first + 2);
+ bitmap_set(ctx->active_entries, idx, 1);
}
/**
@@ -276,8 +274,7 @@ void xe_rtp_process_to_sr(struct xe_rtp_process_ctx *ctx,
}
if (match)
- rtp_mark_active(xe, ctx, entry - entries,
- entry - entries);
+ rtp_mark_active(xe, ctx, entry - entries);
}
}
EXPORT_SYMBOL_IF_KUNIT(xe_rtp_process_to_sr);
@@ -288,44 +285,29 @@ EXPORT_SYMBOL_IF_KUNIT(xe_rtp_process_to_sr);
* @entries: Table with RTP definitions
*
* Walk the table pointed by @entries (with an empty sentinel), executing the
- * rules. A few differences from xe_rtp_process_to_sr():
- *
- * 1. There is no action associated with each entry since this uses
- * struct xe_rtp_entry. Its main use is for marking active workarounds via
- * xe_rtp_process_ctx_enable_active_tracking().
- * 2. There is support for OR operations by having entries with no name.
+ * rules. One difference from xe_rtp_process_to_sr(): there is no action
+ * associated with each entry since this uses struct xe_rtp_entry. Its main use
+ * is for marking active workarounds via
+ * xe_rtp_process_ctx_enable_active_tracking().
*/
void xe_rtp_process(struct xe_rtp_process_ctx *ctx,
const struct xe_rtp_entry *entries)
{
- const struct xe_rtp_entry *entry, *first_entry;
+ const struct xe_rtp_entry *entry;
struct xe_hw_engine *hwe;
struct xe_gt *gt;
struct xe_device *xe;
rtp_get_context(ctx, &hwe, &gt, &xe);
- first_entry = entries;
- if (drm_WARN_ON(&xe->drm, !first_entry->name))
- return;
-
for (entry = entries; entry && entry->rules; entry++) {
- if (entry->name)
- first_entry = entry;
-
if (!rule_matches(xe, gt, hwe, entry->rules, entry->n_rules))
continue;
- /* Fast-forward entry, eliminating the OR'ed entries */
- for (entry++; entry && entry->rules; entry++)
- if (entry->name)
- break;
- entry--;
-
- rtp_mark_active(xe, ctx, first_entry - entries,
- entry - entries);
+ rtp_mark_active(xe, ctx, entry - entries);
}
}
+EXPORT_SYMBOL_IF_KUNIT(xe_rtp_process);
bool xe_rtp_match_even_instance(const struct xe_gt *gt,
const struct xe_hw_engine *hwe)
diff --git a/drivers/gpu/drm/xe/xe_rtp.h b/drivers/gpu/drm/xe/xe_rtp.h
index ad446731192c..827d932b6908 100644
--- a/drivers/gpu/drm/xe/xe_rtp.h
+++ b/drivers/gpu/drm/xe/xe_rtp.h
@@ -374,7 +374,7 @@ struct xe_reg_sr;
* XE_RTP_RULES - Helper to set multiple rules to a struct xe_rtp_entry_sr entry
* @...: Rules
*
- * At least one rule is needed and up to 6 are supported. Multiple rules are
+ * At least one rule is needed and up to 12 are supported. Multiple rules are
* AND'ed together, i.e. all the rules must evaluate to true for the entry to
* be processed. See XE_RTP_MATCH_* for the possible match rules. Example:
*
@@ -399,7 +399,7 @@ struct xe_reg_sr;
* XE_RTP_ACTIONS - Helper to set multiple actions to a struct xe_rtp_entry_sr
* @...: Actions to be taken
*
- * At least one action is needed and up to 6 are supported. See XE_RTP_ACTION_*
+ * At least one action is needed and up to 12 are supported. See XE_RTP_ACTION_*
* for the possible actions. Example:
*
* .. code-block:: c
diff --git a/drivers/gpu/drm/xe/xe_rtp_helpers.h b/drivers/gpu/drm/xe/xe_rtp_helpers.h
index c59e40fd7fff..a33b0ae98bbc 100644
--- a/drivers/gpu/drm/xe/xe_rtp_helpers.h
+++ b/drivers/gpu/drm/xe/xe_rtp_helpers.h
@@ -60,6 +60,12 @@
#define XE_RTP_PASTE_4(prefix_, sep_, args_) _XE_RTP_CONCAT(prefix_, FIRST_ARG args_) __XE_RTP_PASTE_SEP_ ## sep_ XE_RTP_PASTE_3(prefix_, sep_, _XE_TUPLE_TAIL args_)
#define XE_RTP_PASTE_5(prefix_, sep_, args_) _XE_RTP_CONCAT(prefix_, FIRST_ARG args_) __XE_RTP_PASTE_SEP_ ## sep_ XE_RTP_PASTE_4(prefix_, sep_, _XE_TUPLE_TAIL args_)
#define XE_RTP_PASTE_6(prefix_, sep_, args_) _XE_RTP_CONCAT(prefix_, FIRST_ARG args_) __XE_RTP_PASTE_SEP_ ## sep_ XE_RTP_PASTE_5(prefix_, sep_, _XE_TUPLE_TAIL args_)
+#define XE_RTP_PASTE_7(prefix_, sep_, args_) _XE_RTP_CONCAT(prefix_, FIRST_ARG args_) __XE_RTP_PASTE_SEP_ ## sep_ XE_RTP_PASTE_6(prefix_, sep_, _XE_TUPLE_TAIL args_)
+#define XE_RTP_PASTE_8(prefix_, sep_, args_) _XE_RTP_CONCAT(prefix_, FIRST_ARG args_) __XE_RTP_PASTE_SEP_ ## sep_ XE_RTP_PASTE_7(prefix_, sep_, _XE_TUPLE_TAIL args_)
+#define XE_RTP_PASTE_9(prefix_, sep_, args_) _XE_RTP_CONCAT(prefix_, FIRST_ARG args_) __XE_RTP_PASTE_SEP_ ## sep_ XE_RTP_PASTE_8(prefix_, sep_, _XE_TUPLE_TAIL args_)
+#define XE_RTP_PASTE_10(prefix_, sep_, args_) _XE_RTP_CONCAT(prefix_, FIRST_ARG args_) __XE_RTP_PASTE_SEP_ ## sep_ XE_RTP_PASTE_9(prefix_, sep_, _XE_TUPLE_TAIL args_)
+#define XE_RTP_PASTE_11(prefix_, sep_, args_) _XE_RTP_CONCAT(prefix_, FIRST_ARG args_) __XE_RTP_PASTE_SEP_ ## sep_ XE_RTP_PASTE_10(prefix_, sep_, _XE_TUPLE_TAIL args_)
+#define XE_RTP_PASTE_12(prefix_, sep_, args_) _XE_RTP_CONCAT(prefix_, FIRST_ARG args_) __XE_RTP_PASTE_SEP_ ## sep_ XE_RTP_PASTE_11(prefix_, sep_, _XE_TUPLE_TAIL args_)
/*
* XE_RTP_DROP_CAST - Drop cast to convert a compound statement to a initializer
diff --git a/drivers/gpu/drm/xe/xe_sa.c b/drivers/gpu/drm/xe/xe_sa.c
index 8941522b7705..f3060979e63f 100644
--- a/drivers/gpu/drm/xe/xe_sa.c
+++ b/drivers/gpu/drm/xe/xe_sa.c
@@ -84,6 +84,13 @@ struct xe_sa_manager *xe_sa_bo_manager_init(struct xe_tile *tile, u32 size, u32
struct drm_suballoc *xe_sa_bo_new(struct xe_sa_manager *sa_manager,
unsigned int size)
{
+ /*
+ * BB to large, return -ENOBUFS indicating user should split
+ * array of binds into smaller chunks.
+ */
+ if (size > sa_manager->base.size)
+ return ERR_PTR(-ENOBUFS);
+
return drm_suballoc_new(&sa_manager->base, size, GFP_KERNEL, true, 0);
}
diff --git a/drivers/gpu/drm/xe/xe_sriov.c b/drivers/gpu/drm/xe/xe_sriov.c
index a274a5fb1401..5a1d65e4f19f 100644
--- a/drivers/gpu/drm/xe/xe_sriov.c
+++ b/drivers/gpu/drm/xe/xe_sriov.c
@@ -5,7 +5,7 @@
#include <drm/drm_managed.h>
-#include "regs/xe_sriov_regs.h"
+#include "regs/xe_regs.h"
#include "xe_assert.h"
#include "xe_device.h"
diff --git a/drivers/gpu/drm/xe/xe_sync.c b/drivers/gpu/drm/xe/xe_sync.c
index 2883d9aca404..533246f42256 100644
--- a/drivers/gpu/drm/xe/xe_sync.c
+++ b/drivers/gpu/drm/xe/xe_sync.c
@@ -53,14 +53,18 @@ static struct xe_user_fence *user_fence_create(struct xe_device *xe, u64 addr,
u64 value)
{
struct xe_user_fence *ufence;
+ u64 __user *ptr = u64_to_user_ptr(addr);
+
+ if (!access_ok(ptr, sizeof(ptr)))
+ return ERR_PTR(-EFAULT);
ufence = kmalloc(sizeof(*ufence), GFP_KERNEL);
if (!ufence)
- return NULL;
+ return ERR_PTR(-ENOMEM);
ufence->xe = xe;
kref_init(&ufence->refcount);
- ufence->addr = u64_to_user_ptr(addr);
+ ufence->addr = ptr;
ufence->value = value;
ufence->mm = current->mm;
mmgrab(ufence->mm);
@@ -183,8 +187,8 @@ int xe_sync_entry_parse(struct xe_device *xe, struct xe_file *xef,
} else {
sync->ufence = user_fence_create(xe, sync_in.addr,
sync_in.timeline_value);
- if (XE_IOCTL_DBG(xe, !sync->ufence))
- return -ENOMEM;
+ if (XE_IOCTL_DBG(xe, IS_ERR(sync->ufence)))
+ return PTR_ERR(sync->ufence);
}
break;
@@ -200,14 +204,6 @@ int xe_sync_entry_parse(struct xe_device *xe, struct xe_file *xef,
return 0;
}
-int xe_sync_entry_wait(struct xe_sync_entry *sync)
-{
- if (sync->fence)
- dma_fence_wait(sync->fence, true);
-
- return 0;
-}
-
int xe_sync_entry_add_deps(struct xe_sync_entry *sync, struct xe_sched_job *job)
{
int err;
diff --git a/drivers/gpu/drm/xe/xe_sync.h b/drivers/gpu/drm/xe/xe_sync.h
index 006dbf780793..256ffc1e54dc 100644
--- a/drivers/gpu/drm/xe/xe_sync.h
+++ b/drivers/gpu/drm/xe/xe_sync.h
@@ -22,7 +22,6 @@ int xe_sync_entry_parse(struct xe_device *xe, struct xe_file *xef,
struct xe_sync_entry *sync,
struct drm_xe_sync __user *sync_user,
unsigned int flags);
-int xe_sync_entry_wait(struct xe_sync_entry *sync);
int xe_sync_entry_add_deps(struct xe_sync_entry *sync,
struct xe_sched_job *job);
void xe_sync_entry_signal(struct xe_sync_entry *sync,
diff --git a/drivers/gpu/drm/xe/xe_trace.h b/drivers/gpu/drm/xe/xe_trace.h
index baba14fb1e32..1abdb30cb7ad 100644
--- a/drivers/gpu/drm/xe/xe_trace.h
+++ b/drivers/gpu/drm/xe/xe_trace.h
@@ -369,6 +369,58 @@ TRACE_EVENT(xe_reg_rw,
(u32)(__entry->val >> 32))
);
+DECLARE_EVENT_CLASS(xe_pm_runtime,
+ TP_PROTO(struct xe_device *xe, void *caller),
+ TP_ARGS(xe, caller),
+
+ TP_STRUCT__entry(
+ __string(dev, __dev_name_xe(xe))
+ __field(void *, caller)
+ ),
+
+ TP_fast_assign(
+ __assign_str(dev);
+ __entry->caller = caller;
+ ),
+
+ TP_printk("dev=%s caller_function=%pS", __get_str(dev), __entry->caller)
+);
+
+DEFINE_EVENT(xe_pm_runtime, xe_pm_runtime_get,
+ TP_PROTO(struct xe_device *xe, void *caller),
+ TP_ARGS(xe, caller)
+);
+
+DEFINE_EVENT(xe_pm_runtime, xe_pm_runtime_put,
+ TP_PROTO(struct xe_device *xe, void *caller),
+ TP_ARGS(xe, caller)
+);
+
+DEFINE_EVENT(xe_pm_runtime, xe_pm_resume,
+ TP_PROTO(struct xe_device *xe, void *caller),
+ TP_ARGS(xe, caller)
+);
+
+DEFINE_EVENT(xe_pm_runtime, xe_pm_suspend,
+ TP_PROTO(struct xe_device *xe, void *caller),
+ TP_ARGS(xe, caller)
+);
+
+DEFINE_EVENT(xe_pm_runtime, xe_pm_runtime_resume,
+ TP_PROTO(struct xe_device *xe, void *caller),
+ TP_ARGS(xe, caller)
+);
+
+DEFINE_EVENT(xe_pm_runtime, xe_pm_runtime_suspend,
+ TP_PROTO(struct xe_device *xe, void *caller),
+ TP_ARGS(xe, caller)
+);
+
+DEFINE_EVENT(xe_pm_runtime, xe_pm_runtime_get_ioctl,
+ TP_PROTO(struct xe_device *xe, void *caller),
+ TP_ARGS(xe, caller)
+);
+
#endif
/* This part must be outside protection */
diff --git a/drivers/gpu/drm/xe/xe_trace_bo.h b/drivers/gpu/drm/xe/xe_trace_bo.h
index f39f09ed3495..9b1a1d4304ae 100644
--- a/drivers/gpu/drm/xe/xe_trace_bo.h
+++ b/drivers/gpu/drm/xe/xe_trace_bo.h
@@ -117,11 +117,6 @@ DEFINE_EVENT(xe_vma, xe_vma_acc,
TP_ARGS(vma)
);
-DEFINE_EVENT(xe_vma, xe_vma_fail,
- TP_PROTO(struct xe_vma *vma),
- TP_ARGS(vma)
-);
-
DEFINE_EVENT(xe_vma, xe_vma_bind,
TP_PROTO(struct xe_vma *vma),
TP_ARGS(vma)
@@ -237,6 +232,11 @@ DEFINE_EVENT(xe_vm, xe_vm_rebind_worker_exit,
TP_ARGS(vm)
);
+DEFINE_EVENT(xe_vm, xe_vm_ops_fail,
+ TP_PROTO(struct xe_vm *vm),
+ TP_ARGS(vm)
+);
+
#endif
/* This part must be outside protection */
diff --git a/drivers/gpu/drm/xe/xe_tuning.c b/drivers/gpu/drm/xe/xe_tuning.c
index d4e6fa918942..77d4eec0118d 100644
--- a/drivers/gpu/drm/xe/xe_tuning.c
+++ b/drivers/gpu/drm/xe/xe_tuning.c
@@ -93,6 +93,14 @@ static const struct xe_rtp_entry_sr lrc_tunings[] = {
REG_FIELD_PREP(L3_PWM_TIMER_INIT_VAL_MASK, 0x7f)))
},
+ /* Xe2_HPG */
+
+ { XE_RTP_NAME("Tuning: vs hit max value"),
+ XE_RTP_RULES(GRAPHICS_VERSION(2001), ENGINE_CLASS(RENDER)),
+ XE_RTP_ACTIONS(FIELD_SET(FF_MODE, VS_HIT_MAX_VALUE_MASK,
+ REG_FIELD_PREP(VS_HIT_MAX_VALUE_MASK, 0x3f)))
+ },
+
{}
};
diff --git a/drivers/gpu/drm/xe/xe_uc_fw.c b/drivers/gpu/drm/xe/xe_uc_fw.c
index 5f23ecd98376..5b70d23724c4 100644
--- a/drivers/gpu/drm/xe/xe_uc_fw.c
+++ b/drivers/gpu/drm/xe/xe_uc_fw.c
@@ -116,6 +116,8 @@ struct fw_blobs_by_type {
fw_def(TIGERLAKE, major_ver(i915, guc, tgl, 70, 19, 2))
#define XE_HUC_FIRMWARE_DEFS(fw_def, mmp_ver, no_ver) \
+ fw_def(BATTLEMAGE, no_ver(xe, huc, bmg)) \
+ fw_def(LUNARLAKE, no_ver(xe, huc, lnl)) \
fw_def(METEORLAKE, no_ver(i915, huc_gsc, mtl)) \
fw_def(DG1, no_ver(i915, huc, dg1)) \
fw_def(ALDERLAKE_P, no_ver(i915, huc, tgl)) \
@@ -125,6 +127,7 @@ struct fw_blobs_by_type {
/* for the GSC FW we match the compatibility version and not the release one */
#define XE_GSC_FIRMWARE_DEFS(fw_def, major_ver) \
+ fw_def(LUNARLAKE, major_ver(xe, gsc, lnl, 1, 0, 0)) \
fw_def(METEORLAKE, major_ver(i915, gsc, mtl, 1, 0, 0))
#define MAKE_FW_PATH(dir__, uc__, shortname__, version__) \
diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
index 5b166fa03684..b715883f40d8 100644
--- a/drivers/gpu/drm/xe/xe_vm.c
+++ b/drivers/gpu/drm/xe/xe_vm.c
@@ -133,8 +133,10 @@ static int wait_for_existing_preempt_fences(struct xe_vm *vm)
if (q->lr.pfence) {
long timeout = dma_fence_wait(q->lr.pfence, false);
- if (timeout < 0)
+ /* Only -ETIME on fence indicates VM needs to be killed */
+ if (timeout < 0 || q->lr.pfence->error == -ETIME)
return -ETIME;
+
dma_fence_put(q->lr.pfence);
q->lr.pfence = NULL;
}
@@ -311,7 +313,15 @@ int __xe_vm_userptr_needs_repin(struct xe_vm *vm)
#define XE_VM_REBIND_RETRY_TIMEOUT_MS 1000
-static void xe_vm_kill(struct xe_vm *vm, bool unlocked)
+/**
+ * xe_vm_kill() - VM Kill
+ * @vm: The VM.
+ * @unlocked: Flag indicates the VM's dma-resv is not held
+ *
+ * Kill the VM by setting banned flag indicated VM is no longer available for
+ * use. If in preempt fence mode, also kill all exec queue attached to the VM.
+ */
+void xe_vm_kill(struct xe_vm *vm, bool unlocked)
{
struct xe_exec_queue *q;
@@ -708,6 +718,42 @@ int xe_vm_userptr_check_repin(struct xe_vm *vm)
list_empty_careful(&vm->userptr.invalidated)) ? 0 : -EAGAIN;
}
+static int xe_vma_ops_alloc(struct xe_vma_ops *vops, bool array_of_binds)
+{
+ int i;
+
+ for (i = 0; i < XE_MAX_TILES_PER_DEVICE; ++i) {
+ if (!vops->pt_update_ops[i].num_ops)
+ continue;
+
+ vops->pt_update_ops[i].ops =
+ kmalloc_array(vops->pt_update_ops[i].num_ops,
+ sizeof(*vops->pt_update_ops[i].ops),
+ GFP_KERNEL);
+ if (!vops->pt_update_ops[i].ops)
+ return array_of_binds ? -ENOBUFS : -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void xe_vma_ops_fini(struct xe_vma_ops *vops)
+{
+ int i;
+
+ for (i = 0; i < XE_MAX_TILES_PER_DEVICE; ++i)
+ kfree(vops->pt_update_ops[i].ops);
+}
+
+static void xe_vma_ops_incr_pt_update_ops(struct xe_vma_ops *vops, u8 tile_mask)
+{
+ int i;
+
+ for (i = 0; i < XE_MAX_TILES_PER_DEVICE; ++i)
+ if (BIT(i) & tile_mask)
+ ++vops->pt_update_ops[i].num_ops;
+}
+
static void xe_vm_populate_rebind(struct xe_vma_op *op, struct xe_vma *vma,
u8 tile_mask)
{
@@ -735,6 +781,7 @@ static int xe_vm_ops_add_rebind(struct xe_vma_ops *vops, struct xe_vma *vma,
xe_vm_populate_rebind(op, vma, tile_mask);
list_add_tail(&op->link, &vops->list);
+ xe_vma_ops_incr_pt_update_ops(vops, tile_mask);
return 0;
}
@@ -751,7 +798,7 @@ int xe_vm_rebind(struct xe_vm *vm, bool rebind_worker)
struct xe_vma *vma, *next;
struct xe_vma_ops vops;
struct xe_vma_op *op, *next_op;
- int err;
+ int err, i;
lockdep_assert_held(&vm->lock);
if ((xe_vm_in_lr_mode(vm) && !rebind_worker) ||
@@ -759,6 +806,8 @@ int xe_vm_rebind(struct xe_vm *vm, bool rebind_worker)
return 0;
xe_vma_ops_init(&vops, vm, NULL, NULL, 0);
+ for (i = 0; i < XE_MAX_TILES_PER_DEVICE; ++i)
+ vops.pt_update_ops[i].wait_vm_bookkeep = true;
xe_vm_assert_held(vm);
list_for_each_entry(vma, &vm->rebind_list, combined_links.rebind) {
@@ -775,6 +824,10 @@ int xe_vm_rebind(struct xe_vm *vm, bool rebind_worker)
goto free_ops;
}
+ err = xe_vma_ops_alloc(&vops, false);
+ if (err)
+ goto free_ops;
+
fence = ops_execute(vm, &vops);
if (IS_ERR(fence)) {
err = PTR_ERR(fence);
@@ -789,6 +842,7 @@ free_ops:
list_del(&op->link);
kfree(op);
}
+ xe_vma_ops_fini(&vops);
return err;
}
@@ -798,6 +852,8 @@ struct dma_fence *xe_vma_rebind(struct xe_vm *vm, struct xe_vma *vma, u8 tile_ma
struct dma_fence *fence = NULL;
struct xe_vma_ops vops;
struct xe_vma_op *op, *next_op;
+ struct xe_tile *tile;
+ u8 id;
int err;
lockdep_assert_held(&vm->lock);
@@ -805,17 +861,30 @@ struct dma_fence *xe_vma_rebind(struct xe_vm *vm, struct xe_vma *vma, u8 tile_ma
xe_assert(vm->xe, xe_vm_in_fault_mode(vm));
xe_vma_ops_init(&vops, vm, NULL, NULL, 0);
+ for_each_tile(tile, vm->xe, id) {
+ vops.pt_update_ops[id].wait_vm_bookkeep = true;
+ vops.pt_update_ops[tile->id].q =
+ xe_tile_migrate_exec_queue(tile);
+ }
err = xe_vm_ops_add_rebind(&vops, vma, tile_mask);
if (err)
return ERR_PTR(err);
+ err = xe_vma_ops_alloc(&vops, false);
+ if (err) {
+ fence = ERR_PTR(err);
+ goto free_ops;
+ }
+
fence = ops_execute(vm, &vops);
+free_ops:
list_for_each_entry_safe(op, next_op, &vops.list, link) {
list_del(&op->link);
kfree(op);
}
+ xe_vma_ops_fini(&vops);
return fence;
}
@@ -1333,6 +1402,8 @@ struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags)
init_rwsem(&vm->userptr.notifier_lock);
spin_lock_init(&vm->userptr.invalidated_lock);
+ ttm_lru_bulk_move_init(&vm->lru_bulk_move);
+
INIT_WORK(&vm->destroy_work, vm_destroy_work_func);
INIT_LIST_HEAD(&vm->preempt.exec_queues);
@@ -1458,6 +1529,7 @@ err_no_resv:
mutex_destroy(&vm->snap_mutex);
for_each_tile(tile, xe, id)
xe_range_fence_tree_fini(&vm->rftree[id]);
+ ttm_lru_bulk_move_fini(&xe->ttm, &vm->lru_bulk_move);
kfree(vm);
if (flags & XE_VM_FLAG_LR_MODE)
xe_pm_runtime_put(xe);
@@ -1601,6 +1673,11 @@ static void vm_destroy_work_func(struct work_struct *w)
XE_WARN_ON(vm->pt_root[id]);
trace_xe_vm_free(vm);
+ ttm_lru_bulk_move_fini(&xe->ttm, &vm->lru_bulk_move);
+
+ if (vm->xef)
+ xe_file_put(vm->xef);
+
kfree(vm);
}
@@ -1637,147 +1714,6 @@ to_wait_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q)
return q ? q : vm->q[0];
}
-static struct dma_fence *
-xe_vm_unbind_vma(struct xe_vma *vma, struct xe_exec_queue *q,
- struct xe_sync_entry *syncs, u32 num_syncs,
- bool first_op, bool last_op)
-{
- struct xe_vm *vm = xe_vma_vm(vma);
- struct xe_exec_queue *wait_exec_queue = to_wait_exec_queue(vm, q);
- struct xe_tile *tile;
- struct dma_fence *fence = NULL;
- struct dma_fence **fences = NULL;
- struct dma_fence_array *cf = NULL;
- int cur_fence = 0;
- int number_tiles = hweight8(vma->tile_present);
- int err;
- u8 id;
-
- trace_xe_vma_unbind(vma);
-
- if (number_tiles > 1) {
- fences = kmalloc_array(number_tiles, sizeof(*fences),
- GFP_KERNEL);
- if (!fences)
- return ERR_PTR(-ENOMEM);
- }
-
- for_each_tile(tile, vm->xe, id) {
- if (!(vma->tile_present & BIT(id)))
- goto next;
-
- fence = __xe_pt_unbind_vma(tile, vma, q ? q : vm->q[id],
- first_op ? syncs : NULL,
- first_op ? num_syncs : 0);
- if (IS_ERR(fence)) {
- err = PTR_ERR(fence);
- goto err_fences;
- }
-
- if (fences)
- fences[cur_fence++] = fence;
-
-next:
- if (q && vm->pt_root[id] && !list_empty(&q->multi_gt_list))
- q = list_next_entry(q, multi_gt_list);
- }
-
- if (fences) {
- cf = dma_fence_array_create(number_tiles, fences,
- vm->composite_fence_ctx,
- vm->composite_fence_seqno++,
- false);
- if (!cf) {
- --vm->composite_fence_seqno;
- err = -ENOMEM;
- goto err_fences;
- }
- }
-
- fence = cf ? &cf->base : !fence ?
- xe_exec_queue_last_fence_get(wait_exec_queue, vm) : fence;
-
- return fence;
-
-err_fences:
- if (fences) {
- while (cur_fence)
- dma_fence_put(fences[--cur_fence]);
- kfree(fences);
- }
-
- return ERR_PTR(err);
-}
-
-static struct dma_fence *
-xe_vm_bind_vma(struct xe_vma *vma, struct xe_exec_queue *q,
- struct xe_sync_entry *syncs, u32 num_syncs,
- u8 tile_mask, bool first_op, bool last_op)
-{
- struct xe_tile *tile;
- struct dma_fence *fence;
- struct dma_fence **fences = NULL;
- struct dma_fence_array *cf = NULL;
- struct xe_vm *vm = xe_vma_vm(vma);
- int cur_fence = 0;
- int number_tiles = hweight8(tile_mask);
- int err;
- u8 id;
-
- trace_xe_vma_bind(vma);
-
- if (number_tiles > 1) {
- fences = kmalloc_array(number_tiles, sizeof(*fences),
- GFP_KERNEL);
- if (!fences)
- return ERR_PTR(-ENOMEM);
- }
-
- for_each_tile(tile, vm->xe, id) {
- if (!(tile_mask & BIT(id)))
- goto next;
-
- fence = __xe_pt_bind_vma(tile, vma, q ? q : vm->q[id],
- first_op ? syncs : NULL,
- first_op ? num_syncs : 0,
- vma->tile_present & BIT(id));
- if (IS_ERR(fence)) {
- err = PTR_ERR(fence);
- goto err_fences;
- }
-
- if (fences)
- fences[cur_fence++] = fence;
-
-next:
- if (q && vm->pt_root[id] && !list_empty(&q->multi_gt_list))
- q = list_next_entry(q, multi_gt_list);
- }
-
- if (fences) {
- cf = dma_fence_array_create(number_tiles, fences,
- vm->composite_fence_ctx,
- vm->composite_fence_seqno++,
- false);
- if (!cf) {
- --vm->composite_fence_seqno;
- err = -ENOMEM;
- goto err_fences;
- }
- }
-
- return cf ? &cf->base : fence;
-
-err_fences:
- if (fences) {
- while (cur_fence)
- dma_fence_put(fences[--cur_fence]);
- kfree(fences);
- }
-
- return ERR_PTR(err);
-}
-
static struct xe_user_fence *
find_ufence_get(struct xe_sync_entry *syncs, u32 num_syncs)
{
@@ -1793,48 +1729,6 @@ find_ufence_get(struct xe_sync_entry *syncs, u32 num_syncs)
return NULL;
}
-static struct dma_fence *
-xe_vm_bind(struct xe_vm *vm, struct xe_vma *vma, struct xe_exec_queue *q,
- struct xe_bo *bo, struct xe_sync_entry *syncs, u32 num_syncs,
- u8 tile_mask, bool immediate, bool first_op, bool last_op)
-{
- struct dma_fence *fence;
- struct xe_exec_queue *wait_exec_queue = to_wait_exec_queue(vm, q);
-
- xe_vm_assert_held(vm);
- xe_bo_assert_held(bo);
-
- if (immediate) {
- fence = xe_vm_bind_vma(vma, q, syncs, num_syncs, tile_mask,
- first_op, last_op);
- if (IS_ERR(fence))
- return fence;
- } else {
- xe_assert(vm->xe, xe_vm_in_fault_mode(vm));
-
- fence = xe_exec_queue_last_fence_get(wait_exec_queue, vm);
- }
-
- return fence;
-}
-
-static struct dma_fence *
-xe_vm_unbind(struct xe_vm *vm, struct xe_vma *vma,
- struct xe_exec_queue *q, struct xe_sync_entry *syncs,
- u32 num_syncs, bool first_op, bool last_op)
-{
- struct dma_fence *fence;
-
- xe_vm_assert_held(vm);
- xe_bo_assert_held(xe_vma_bo(vma));
-
- fence = xe_vm_unbind_vma(vma, q, syncs, num_syncs, first_op, last_op);
- if (IS_ERR(fence))
- return fence;
-
- return fence;
-}
-
#define ALL_DRM_XE_VM_CREATE_FLAGS (DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE | \
DRM_XE_VM_CREATE_FLAG_LR_MODE | \
DRM_XE_VM_CREATE_FLAG_FAULT_MODE)
@@ -1916,7 +1810,7 @@ int xe_vm_create_ioctl(struct drm_device *dev, void *data,
}
args->vm_id = id;
- vm->xef = xef;
+ vm->xef = xe_file_get(xef);
/* Record BO memory for VM pagetable created against client */
for_each_tile(tile, xe, id)
@@ -1975,21 +1869,6 @@ static const u32 region_to_mem_type[] = {
XE_PL_VRAM1,
};
-static struct dma_fence *
-xe_vm_prefetch(struct xe_vm *vm, struct xe_vma *vma,
- struct xe_exec_queue *q, struct xe_sync_entry *syncs,
- u32 num_syncs, bool first_op, bool last_op)
-{
- struct xe_exec_queue *wait_exec_queue = to_wait_exec_queue(vm, q);
-
- if (vma->tile_mask != (vma->tile_present & ~vma->tile_invalidated)) {
- return xe_vm_bind(vm, vma, q, xe_vma_bo(vma), syncs, num_syncs,
- vma->tile_mask, true, first_op, last_op);
- } else {
- return xe_exec_queue_last_fence_get(wait_exec_queue, vm);
- }
-}
-
static void prep_vma_destroy(struct xe_vm *vm, struct xe_vma *vma,
bool post_commit)
{
@@ -2277,14 +2156,10 @@ static int xe_vma_op_commit(struct xe_vm *vm, struct xe_vma_op *op)
return err;
}
-
-static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct xe_exec_queue *q,
- struct drm_gpuva_ops *ops,
- struct xe_sync_entry *syncs, u32 num_syncs,
- struct xe_vma_ops *vops, bool last)
+static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct drm_gpuva_ops *ops,
+ struct xe_vma_ops *vops)
{
struct xe_device *xe = vm->xe;
- struct xe_vma_op *last_op = NULL;
struct drm_gpuva_op *__op;
struct xe_tile *tile;
u8 id, tile_mask = 0;
@@ -2298,19 +2173,10 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct xe_exec_queue *q,
drm_gpuva_for_each_op(__op, ops) {
struct xe_vma_op *op = gpuva_op_to_vma_op(__op);
struct xe_vma *vma;
- bool first = list_empty(&vops->list);
unsigned int flags = 0;
INIT_LIST_HEAD(&op->link);
list_add_tail(&op->link, &vops->list);
-
- if (first) {
- op->flags |= XE_VMA_OP_FIRST;
- op->num_syncs = num_syncs;
- op->syncs = syncs;
- }
-
- op->q = q;
op->tile_mask = tile_mask;
switch (op->base.op) {
@@ -2329,6 +2195,9 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct xe_exec_queue *q,
return PTR_ERR(vma);
op->map.vma = vma;
+ if (op->map.immediate || !xe_vm_in_fault_mode(vm))
+ xe_vma_ops_incr_pt_update_ops(vops,
+ op->tile_mask);
break;
}
case DRM_GPUVA_OP_REMAP:
@@ -2373,6 +2242,8 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct xe_exec_queue *q,
vm_dbg(&xe->drm, "REMAP:SKIP_PREV: addr=0x%016llx, range=0x%016llx",
(ULL)op->remap.start,
(ULL)op->remap.range);
+ } else {
+ xe_vma_ops_incr_pt_update_ops(vops, op->tile_mask);
}
}
@@ -2409,203 +2280,30 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct xe_exec_queue *q,
vm_dbg(&xe->drm, "REMAP:SKIP_NEXT: addr=0x%016llx, range=0x%016llx",
(ULL)op->remap.start,
(ULL)op->remap.range);
+ } else {
+ xe_vma_ops_incr_pt_update_ops(vops, op->tile_mask);
}
}
+ xe_vma_ops_incr_pt_update_ops(vops, op->tile_mask);
break;
}
case DRM_GPUVA_OP_UNMAP:
case DRM_GPUVA_OP_PREFETCH:
- /* Nothing to do */
+ /* FIXME: Need to skip some prefetch ops */
+ xe_vma_ops_incr_pt_update_ops(vops, op->tile_mask);
break;
default:
drm_warn(&vm->xe->drm, "NOT POSSIBLE");
}
- last_op = op;
-
err = xe_vma_op_commit(vm, op);
if (err)
return err;
}
- /* FIXME: Unhandled corner case */
- XE_WARN_ON(!last_op && last && !list_empty(&vops->list));
-
- if (!last_op)
- return 0;
-
- if (last) {
- last_op->flags |= XE_VMA_OP_LAST;
- last_op->num_syncs = num_syncs;
- last_op->syncs = syncs;
- }
-
return 0;
}
-static struct dma_fence *op_execute(struct xe_vm *vm, struct xe_vma *vma,
- struct xe_vma_op *op)
-{
- struct dma_fence *fence = NULL;
-
- lockdep_assert_held(&vm->lock);
-
- xe_vm_assert_held(vm);
- xe_bo_assert_held(xe_vma_bo(vma));
-
- switch (op->base.op) {
- case DRM_GPUVA_OP_MAP:
- fence = xe_vm_bind(vm, vma, op->q, xe_vma_bo(vma),
- op->syncs, op->num_syncs,
- op->tile_mask,
- op->map.immediate || !xe_vm_in_fault_mode(vm),
- op->flags & XE_VMA_OP_FIRST,
- op->flags & XE_VMA_OP_LAST);
- break;
- case DRM_GPUVA_OP_REMAP:
- {
- bool prev = !!op->remap.prev;
- bool next = !!op->remap.next;
-
- if (!op->remap.unmap_done) {
- if (prev || next)
- vma->gpuva.flags |= XE_VMA_FIRST_REBIND;
- fence = xe_vm_unbind(vm, vma, op->q, op->syncs,
- op->num_syncs,
- op->flags & XE_VMA_OP_FIRST,
- op->flags & XE_VMA_OP_LAST &&
- !prev && !next);
- if (IS_ERR(fence))
- break;
- op->remap.unmap_done = true;
- }
-
- if (prev) {
- op->remap.prev->gpuva.flags |= XE_VMA_LAST_REBIND;
- dma_fence_put(fence);
- fence = xe_vm_bind(vm, op->remap.prev, op->q,
- xe_vma_bo(op->remap.prev), op->syncs,
- op->num_syncs,
- op->remap.prev->tile_mask, true,
- false,
- op->flags & XE_VMA_OP_LAST && !next);
- op->remap.prev->gpuva.flags &= ~XE_VMA_LAST_REBIND;
- if (IS_ERR(fence))
- break;
- op->remap.prev = NULL;
- }
-
- if (next) {
- op->remap.next->gpuva.flags |= XE_VMA_LAST_REBIND;
- dma_fence_put(fence);
- fence = xe_vm_bind(vm, op->remap.next, op->q,
- xe_vma_bo(op->remap.next),
- op->syncs, op->num_syncs,
- op->remap.next->tile_mask, true,
- false, op->flags & XE_VMA_OP_LAST);
- op->remap.next->gpuva.flags &= ~XE_VMA_LAST_REBIND;
- if (IS_ERR(fence))
- break;
- op->remap.next = NULL;
- }
-
- break;
- }
- case DRM_GPUVA_OP_UNMAP:
- fence = xe_vm_unbind(vm, vma, op->q, op->syncs,
- op->num_syncs, op->flags & XE_VMA_OP_FIRST,
- op->flags & XE_VMA_OP_LAST);
- break;
- case DRM_GPUVA_OP_PREFETCH:
- fence = xe_vm_prefetch(vm, vma, op->q, op->syncs, op->num_syncs,
- op->flags & XE_VMA_OP_FIRST,
- op->flags & XE_VMA_OP_LAST);
- break;
- default:
- drm_warn(&vm->xe->drm, "NOT POSSIBLE");
- }
-
- if (IS_ERR(fence))
- trace_xe_vma_fail(vma);
-
- return fence;
-}
-
-static struct dma_fence *
-__xe_vma_op_execute(struct xe_vm *vm, struct xe_vma *vma,
- struct xe_vma_op *op)
-{
- struct dma_fence *fence;
- int err;
-
-retry_userptr:
- fence = op_execute(vm, vma, op);
- if (IS_ERR(fence) && PTR_ERR(fence) == -EAGAIN) {
- lockdep_assert_held_write(&vm->lock);
-
- if (op->base.op == DRM_GPUVA_OP_REMAP) {
- if (!op->remap.unmap_done)
- vma = gpuva_to_vma(op->base.remap.unmap->va);
- else if (op->remap.prev)
- vma = op->remap.prev;
- else
- vma = op->remap.next;
- }
-
- if (xe_vma_is_userptr(vma)) {
- err = xe_vma_userptr_pin_pages(to_userptr_vma(vma));
- if (!err)
- goto retry_userptr;
-
- fence = ERR_PTR(err);
- trace_xe_vma_fail(vma);
- }
- }
-
- return fence;
-}
-
-static struct dma_fence *
-xe_vma_op_execute(struct xe_vm *vm, struct xe_vma_op *op)
-{
- struct dma_fence *fence = ERR_PTR(-ENOMEM);
-
- lockdep_assert_held(&vm->lock);
-
- switch (op->base.op) {
- case DRM_GPUVA_OP_MAP:
- fence = __xe_vma_op_execute(vm, op->map.vma, op);
- break;
- case DRM_GPUVA_OP_REMAP:
- {
- struct xe_vma *vma;
-
- if (!op->remap.unmap_done)
- vma = gpuva_to_vma(op->base.remap.unmap->va);
- else if (op->remap.prev)
- vma = op->remap.prev;
- else
- vma = op->remap.next;
-
- fence = __xe_vma_op_execute(vm, vma, op);
- break;
- }
- case DRM_GPUVA_OP_UNMAP:
- fence = __xe_vma_op_execute(vm, gpuva_to_vma(op->base.unmap.va),
- op);
- break;
- case DRM_GPUVA_OP_PREFETCH:
- fence = __xe_vma_op_execute(vm,
- gpuva_to_vma(op->base.prefetch.va),
- op);
- break;
- default:
- drm_warn(&vm->xe->drm, "NOT POSSIBLE");
- }
-
- return fence;
-}
-
static void xe_vma_op_unwind(struct xe_vm *vm, struct xe_vma_op *op,
bool post_commit, bool prev_post_commit,
bool next_post_commit)
@@ -2788,26 +2486,157 @@ static int vm_bind_ioctl_ops_lock_and_prep(struct drm_exec *exec,
return err;
}
+#ifdef TEST_VM_OPS_ERROR
+ if (vops->inject_error &&
+ vm->xe->vm_inject_error_position == FORCE_OP_ERROR_LOCK)
+ return -ENOSPC;
+#endif
+
return 0;
}
+static void op_trace(struct xe_vma_op *op)
+{
+ switch (op->base.op) {
+ case DRM_GPUVA_OP_MAP:
+ trace_xe_vma_bind(op->map.vma);
+ break;
+ case DRM_GPUVA_OP_REMAP:
+ trace_xe_vma_unbind(gpuva_to_vma(op->base.remap.unmap->va));
+ if (op->remap.prev)
+ trace_xe_vma_bind(op->remap.prev);
+ if (op->remap.next)
+ trace_xe_vma_bind(op->remap.next);
+ break;
+ case DRM_GPUVA_OP_UNMAP:
+ trace_xe_vma_unbind(gpuva_to_vma(op->base.unmap.va));
+ break;
+ case DRM_GPUVA_OP_PREFETCH:
+ trace_xe_vma_bind(gpuva_to_vma(op->base.prefetch.va));
+ break;
+ default:
+ XE_WARN_ON("NOT POSSIBLE");
+ }
+}
+
+static void trace_xe_vm_ops_execute(struct xe_vma_ops *vops)
+{
+ struct xe_vma_op *op;
+
+ list_for_each_entry(op, &vops->list, link)
+ op_trace(op);
+}
+
+static int vm_ops_setup_tile_args(struct xe_vm *vm, struct xe_vma_ops *vops)
+{
+ struct xe_exec_queue *q = vops->q;
+ struct xe_tile *tile;
+ int number_tiles = 0;
+ u8 id;
+
+ for_each_tile(tile, vm->xe, id) {
+ if (vops->pt_update_ops[id].num_ops)
+ ++number_tiles;
+
+ if (vops->pt_update_ops[id].q)
+ continue;
+
+ if (q) {
+ vops->pt_update_ops[id].q = q;
+ if (vm->pt_root[id] && !list_empty(&q->multi_gt_list))
+ q = list_next_entry(q, multi_gt_list);
+ } else {
+ vops->pt_update_ops[id].q = vm->q[id];
+ }
+ }
+
+ return number_tiles;
+}
+
static struct dma_fence *ops_execute(struct xe_vm *vm,
struct xe_vma_ops *vops)
{
- struct xe_vma_op *op, *next;
+ struct xe_tile *tile;
struct dma_fence *fence = NULL;
+ struct dma_fence **fences = NULL;
+ struct dma_fence_array *cf = NULL;
+ int number_tiles = 0, current_fence = 0, err;
+ u8 id;
- list_for_each_entry_safe(op, next, &vops->list, link) {
- dma_fence_put(fence);
- fence = xe_vma_op_execute(vm, op);
- if (IS_ERR(fence)) {
- drm_warn(&vm->xe->drm, "VM op(%d) failed with %ld",
- op->base.op, PTR_ERR(fence));
- fence = ERR_PTR(-ENOSPC);
- break;
+ number_tiles = vm_ops_setup_tile_args(vm, vops);
+ if (number_tiles == 0)
+ return ERR_PTR(-ENODATA);
+
+ if (number_tiles > 1) {
+ fences = kmalloc_array(number_tiles, sizeof(*fences),
+ GFP_KERNEL);
+ if (!fences) {
+ fence = ERR_PTR(-ENOMEM);
+ goto err_trace;
+ }
+ }
+
+ for_each_tile(tile, vm->xe, id) {
+ if (!vops->pt_update_ops[id].num_ops)
+ continue;
+
+ err = xe_pt_update_ops_prepare(tile, vops);
+ if (err) {
+ fence = ERR_PTR(err);
+ goto err_out;
}
}
+ trace_xe_vm_ops_execute(vops);
+
+ for_each_tile(tile, vm->xe, id) {
+ if (!vops->pt_update_ops[id].num_ops)
+ continue;
+
+ fence = xe_pt_update_ops_run(tile, vops);
+ if (IS_ERR(fence))
+ goto err_out;
+
+ if (fences)
+ fences[current_fence++] = fence;
+ }
+
+ if (fences) {
+ cf = dma_fence_array_create(number_tiles, fences,
+ vm->composite_fence_ctx,
+ vm->composite_fence_seqno++,
+ false);
+ if (!cf) {
+ --vm->composite_fence_seqno;
+ fence = ERR_PTR(-ENOMEM);
+ goto err_out;
+ }
+ fence = &cf->base;
+ }
+
+ for_each_tile(tile, vm->xe, id) {
+ if (!vops->pt_update_ops[id].num_ops)
+ continue;
+
+ xe_pt_update_ops_fini(tile, vops);
+ }
+
+ return fence;
+
+err_out:
+ for_each_tile(tile, vm->xe, id) {
+ if (!vops->pt_update_ops[id].num_ops)
+ continue;
+
+ xe_pt_update_ops_abort(tile, vops);
+ }
+ while (current_fence)
+ dma_fence_put(fences[--current_fence]);
+ kfree(fences);
+ kfree(cf);
+
+err_trace:
+ trace_xe_vm_ops_fail(vm);
return fence;
}
@@ -2888,12 +2717,10 @@ static int vm_bind_ioctl_ops_execute(struct xe_vm *vm,
fence = ops_execute(vm, vops);
if (IS_ERR(fence)) {
err = PTR_ERR(fence);
- /* FIXME: Killing VM rather than proper error handling */
- xe_vm_kill(vm, false);
goto unlock;
- } else {
- vm_bind_ioctl_ops_fini(vm, vops, fence);
}
+
+ vm_bind_ioctl_ops_fini(vm, vops, fence);
}
unlock:
@@ -2901,11 +2728,18 @@ unlock:
return err;
}
-#define SUPPORTED_FLAGS \
+#define SUPPORTED_FLAGS_STUB \
(DRM_XE_VM_BIND_FLAG_READONLY | \
DRM_XE_VM_BIND_FLAG_IMMEDIATE | \
DRM_XE_VM_BIND_FLAG_NULL | \
DRM_XE_VM_BIND_FLAG_DUMPABLE)
+
+#ifdef TEST_VM_OPS_ERROR
+#define SUPPORTED_FLAGS (SUPPORTED_FLAGS_STUB | FORCE_OP_ERROR)
+#else
+#define SUPPORTED_FLAGS SUPPORTED_FLAGS_STUB
+#endif
+
#define XE_64K_PAGE_MASK 0xffffull
#define ALL_DRM_XE_SYNCS_FLAGS (DRM_XE_SYNCS_FLAG_WAIT_FOR_OP)
@@ -2931,7 +2765,7 @@ static int vm_bind_ioctl_check_args(struct xe_device *xe,
sizeof(struct drm_xe_vm_bind_op),
GFP_KERNEL | __GFP_ACCOUNT);
if (!*bind_ops)
- return -ENOMEM;
+ return args->num_binds > 1 ? -ENOBUFS : -ENOMEM;
err = __copy_from_user(*bind_ops, bind_user,
sizeof(struct drm_xe_vm_bind_op) *
@@ -3250,10 +3084,18 @@ int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
goto unwind_ops;
}
- err = vm_bind_ioctl_ops_parse(vm, q, ops[i], syncs, num_syncs,
- &vops, i == args->num_binds - 1);
+ err = vm_bind_ioctl_ops_parse(vm, ops[i], &vops);
if (err)
goto unwind_ops;
+
+#ifdef TEST_VM_OPS_ERROR
+ if (flags & FORCE_OP_ERROR) {
+ vops.inject_error = true;
+ vm->xe->vm_inject_error_position =
+ (vm->xe->vm_inject_error_position + 1) %
+ FORCE_OP_ERROR_COUNT;
+ }
+#endif
}
/* Nothing to do */
@@ -3262,11 +3104,16 @@ int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
goto unwind_ops;
}
+ err = xe_vma_ops_alloc(&vops, args->num_binds > 1);
+ if (err)
+ goto unwind_ops;
+
err = vm_bind_ioctl_ops_execute(vm, &vops);
unwind_ops:
if (err && err != -ENODATA)
vm_bind_ioctl_ops_unwind(vm, ops, args->num_binds);
+ xe_vma_ops_fini(&vops);
for (i = args->num_binds - 1; i >= 0; --i)
if (ops[i])
drm_gpuva_ops_free(&vm->gpuvm, ops[i]);
@@ -3337,10 +3184,10 @@ int xe_vm_invalidate_vma(struct xe_vma *vma)
{
struct xe_device *xe = xe_vma_vm(vma)->xe;
struct xe_tile *tile;
+ struct xe_gt_tlb_invalidation_fence fence[XE_MAX_TILES_PER_DEVICE];
u32 tile_needs_invalidate = 0;
- int seqno[XE_MAX_TILES_PER_DEVICE];
u8 id;
- int ret;
+ int ret = 0;
xe_assert(xe, !xe_vma_is_null(vma));
trace_xe_vma_invalidate(vma);
@@ -3365,29 +3212,33 @@ int xe_vm_invalidate_vma(struct xe_vma *vma)
for_each_tile(tile, xe, id) {
if (xe_pt_zap_ptes(tile, vma)) {
- tile_needs_invalidate |= BIT(id);
xe_device_wmb(xe);
+ xe_gt_tlb_invalidation_fence_init(tile->primary_gt,
+ &fence[id], true);
+
/*
* FIXME: We potentially need to invalidate multiple
* GTs within the tile
*/
- seqno[id] = xe_gt_tlb_invalidation_vma(tile->primary_gt, NULL, vma);
- if (seqno[id] < 0)
- return seqno[id];
- }
- }
+ ret = xe_gt_tlb_invalidation_vma(tile->primary_gt,
+ &fence[id], vma);
+ if (ret < 0) {
+ xe_gt_tlb_invalidation_fence_fini(&fence[id]);
+ goto wait;
+ }
- for_each_tile(tile, xe, id) {
- if (tile_needs_invalidate & BIT(id)) {
- ret = xe_gt_tlb_invalidation_wait(tile->primary_gt, seqno[id]);
- if (ret < 0)
- return ret;
+ tile_needs_invalidate |= BIT(id);
}
}
+wait:
+ for_each_tile(tile, xe, id)
+ if (tile_needs_invalidate & BIT(id))
+ xe_gt_tlb_invalidation_fence_wait(&fence[id]);
+
vma->tile_invalidated = vma->tile_mask;
- return 0;
+ return ret;
}
struct xe_vm_snapshot {
diff --git a/drivers/gpu/drm/xe/xe_vm.h b/drivers/gpu/drm/xe/xe_vm.h
index b481608b12f1..c864dba35e1d 100644
--- a/drivers/gpu/drm/xe/xe_vm.h
+++ b/drivers/gpu/drm/xe/xe_vm.h
@@ -259,6 +259,8 @@ static inline struct dma_resv *xe_vm_resv(struct xe_vm *vm)
return drm_gpuvm_resv(&vm->gpuvm);
}
+void xe_vm_kill(struct xe_vm *vm, bool unlocked);
+
/**
* xe_vm_assert_held(vm) - Assert that the vm's reservation object is held.
* @vm: The vm
diff --git a/drivers/gpu/drm/xe/xe_vm_types.h b/drivers/gpu/drm/xe/xe_vm_types.h
index ce1a63a5e3e7..7f9a303e51d8 100644
--- a/drivers/gpu/drm/xe/xe_vm_types.h
+++ b/drivers/gpu/drm/xe/xe_vm_types.h
@@ -21,18 +21,27 @@ struct xe_bo;
struct xe_sync_entry;
struct xe_user_fence;
struct xe_vm;
+struct xe_vm_pgtable_update_op;
+
+#if IS_ENABLED(CONFIG_DRM_XE_DEBUG)
+#define TEST_VM_OPS_ERROR
+#define FORCE_OP_ERROR BIT(31)
+
+#define FORCE_OP_ERROR_LOCK 0
+#define FORCE_OP_ERROR_PREPARE 1
+#define FORCE_OP_ERROR_RUN 2
+#define FORCE_OP_ERROR_COUNT 3
+#endif
#define XE_VMA_READ_ONLY DRM_GPUVA_USERBITS
#define XE_VMA_DESTROYED (DRM_GPUVA_USERBITS << 1)
#define XE_VMA_ATOMIC_PTE_BIT (DRM_GPUVA_USERBITS << 2)
-#define XE_VMA_FIRST_REBIND (DRM_GPUVA_USERBITS << 3)
-#define XE_VMA_LAST_REBIND (DRM_GPUVA_USERBITS << 4)
-#define XE_VMA_PTE_4K (DRM_GPUVA_USERBITS << 5)
-#define XE_VMA_PTE_2M (DRM_GPUVA_USERBITS << 6)
-#define XE_VMA_PTE_1G (DRM_GPUVA_USERBITS << 7)
-#define XE_VMA_PTE_64K (DRM_GPUVA_USERBITS << 8)
-#define XE_VMA_PTE_COMPACT (DRM_GPUVA_USERBITS << 9)
-#define XE_VMA_DUMPABLE (DRM_GPUVA_USERBITS << 10)
+#define XE_VMA_PTE_4K (DRM_GPUVA_USERBITS << 3)
+#define XE_VMA_PTE_2M (DRM_GPUVA_USERBITS << 4)
+#define XE_VMA_PTE_1G (DRM_GPUVA_USERBITS << 5)
+#define XE_VMA_PTE_64K (DRM_GPUVA_USERBITS << 6)
+#define XE_VMA_PTE_COMPACT (DRM_GPUVA_USERBITS << 7)
+#define XE_VMA_DUMPABLE (DRM_GPUVA_USERBITS << 8)
/** struct xe_userptr - User pointer */
struct xe_userptr {
@@ -99,6 +108,9 @@ struct xe_vma {
*/
u8 tile_present;
+ /** @tile_staged: bind is staged for this VMA */
+ u8 tile_staged;
+
/**
* @pat_index: The pat index to use when encoding the PTEs for this vma.
*/
@@ -314,31 +326,18 @@ struct xe_vma_op_prefetch {
/** enum xe_vma_op_flags - flags for VMA operation */
enum xe_vma_op_flags {
- /** @XE_VMA_OP_FIRST: first VMA operation for a set of syncs */
- XE_VMA_OP_FIRST = BIT(0),
- /** @XE_VMA_OP_LAST: last VMA operation for a set of syncs */
- XE_VMA_OP_LAST = BIT(1),
/** @XE_VMA_OP_COMMITTED: VMA operation committed */
- XE_VMA_OP_COMMITTED = BIT(2),
+ XE_VMA_OP_COMMITTED = BIT(0),
/** @XE_VMA_OP_PREV_COMMITTED: Previous VMA operation committed */
- XE_VMA_OP_PREV_COMMITTED = BIT(3),
+ XE_VMA_OP_PREV_COMMITTED = BIT(1),
/** @XE_VMA_OP_NEXT_COMMITTED: Next VMA operation committed */
- XE_VMA_OP_NEXT_COMMITTED = BIT(4),
+ XE_VMA_OP_NEXT_COMMITTED = BIT(2),
};
/** struct xe_vma_op - VMA operation */
struct xe_vma_op {
/** @base: GPUVA base operation */
struct drm_gpuva_op base;
- /** @q: exec queue for this operation */
- struct xe_exec_queue *q;
- /**
- * @syncs: syncs for this operation, only used on first and last
- * operation
- */
- struct xe_sync_entry *syncs;
- /** @num_syncs: number of syncs */
- u32 num_syncs;
/** @link: async operation link */
struct list_head link;
/** @flags: operation flags */
@@ -362,12 +361,18 @@ struct xe_vma_ops {
struct list_head list;
/** @vm: VM */
struct xe_vm *vm;
- /** @q: exec queue these operations */
+ /** @q: exec queue for VMA operations */
struct xe_exec_queue *q;
/** @syncs: syncs these operation */
struct xe_sync_entry *syncs;
/** @num_syncs: number of syncs */
u32 num_syncs;
+ /** @pt_update_ops: page table update operations */
+ struct xe_vm_pgtable_update_ops pt_update_ops[XE_MAX_TILES_PER_DEVICE];
+#ifdef TEST_VM_OPS_ERROR
+ /** @inject_error: inject error to test error handling */
+ bool inject_error;
+#endif
};
#endif
diff --git a/drivers/gpu/drm/xe/xe_wa.c b/drivers/gpu/drm/xe/xe_wa.c
index c7bf0862b231..564e32e44e3b 100644
--- a/drivers/gpu/drm/xe/xe_wa.c
+++ b/drivers/gpu/drm/xe/xe_wa.c
@@ -486,6 +486,10 @@ static const struct xe_rtp_entry_sr engine_was[] = {
XE_RTP_RULES(GRAPHICS_VERSION(2004), FUNC(xe_rtp_match_first_render_or_compute)),
XE_RTP_ACTIONS(SET(TDL_TSL_CHICKEN, SLM_WMTP_RESTORE))
},
+ { XE_RTP_NAME("14021402888"),
+ XE_RTP_RULES(GRAPHICS_VERSION(2004), ENGINE_CLASS(RENDER)),
+ XE_RTP_ACTIONS(SET(HALF_SLICE_CHICKEN7, CLEAR_OPTIMIZATION_DISABLE))
+ },
/* Xe2_HPG */
@@ -539,6 +543,16 @@ static const struct xe_rtp_entry_sr engine_was[] = {
XE_RTP_ACTIONS(SET(HALF_SLICE_CHICKEN7, CLEAR_OPTIMIZATION_DISABLE))
},
+ /* Xe2_LPM */
+
+ { XE_RTP_NAME("16021639441"),
+ XE_RTP_RULES(MEDIA_VERSION(2000)),
+ XE_RTP_ACTIONS(SET(CSFE_CHICKEN1(0),
+ GHWSP_CSB_REPORT_DIS |
+ PPHWSP_CSB_AND_TIMESTAMP_REPORT_DIS,
+ XE_RTP_ACTION_FLAG(ENGINE_BASE)))
+ },
+
/* Xe2_HPM */
{ XE_RTP_NAME("16021639441"),
@@ -741,6 +755,7 @@ void xe_wa_process_oob(struct xe_gt *gt)
xe_rtp_process_ctx_enable_active_tracking(&ctx, gt->wa_active.oob,
ARRAY_SIZE(oob_was));
+ gt->wa_active.oob_initialized = true;
xe_rtp_process(&ctx, oob_was);
}
diff --git a/drivers/gpu/drm/xe/xe_wa.h b/drivers/gpu/drm/xe/xe_wa.h
index db9ddeaf69bf..52337405b5bc 100644
--- a/drivers/gpu/drm/xe/xe_wa.h
+++ b/drivers/gpu/drm/xe/xe_wa.h
@@ -6,6 +6,8 @@
#ifndef _XE_WA_
#define _XE_WA_
+#include "xe_assert.h"
+
struct drm_printer;
struct xe_gt;
struct xe_hw_engine;
@@ -25,6 +27,9 @@ void xe_wa_dump(struct xe_gt *gt, struct drm_printer *p);
* @gt__: gt instance
* @id__: XE_OOB_<id__>, as generated by build system in generated/xe_wa_oob.h
*/
-#define XE_WA(gt__, id__) test_bit(XE_WA_OOB_ ## id__, (gt__)->wa_active.oob)
+#define XE_WA(gt__, id__) ({ \
+ xe_gt_assert(gt__, (gt__)->wa_active.oob_initialized); \
+ test_bit(XE_WA_OOB_ ## id__, (gt__)->wa_active.oob); \
+})
#endif
diff --git a/drivers/gpu/drm/xe/xe_wa_oob.rules b/drivers/gpu/drm/xe/xe_wa_oob.rules
index 26066beb4f6f..540d38603f32 100644
--- a/drivers/gpu/drm/xe/xe_wa_oob.rules
+++ b/drivers/gpu/drm/xe/xe_wa_oob.rules
@@ -29,3 +29,5 @@
13011645652 GRAPHICS_VERSION(2004)
22019338487 MEDIA_VERSION(2000)
GRAPHICS_VERSION(2001)
+22019338487_display PLATFORM(LUNARLAKE)
+16023588340 GRAPHICS_VERSION(2001)
diff --git a/drivers/hid/amd-sfh-hid/amd_sfh_client.c b/drivers/hid/amd-sfh-hid/amd_sfh_client.c
index bdb578e0899f..4b59687ff5d8 100644
--- a/drivers/hid/amd-sfh-hid/amd_sfh_client.c
+++ b/drivers/hid/amd-sfh-hid/amd_sfh_client.c
@@ -288,12 +288,22 @@ int amd_sfh_hid_client_init(struct amd_mp2_dev *privdata)
mp2_ops->start(privdata, info);
cl_data->sensor_sts[i] = amd_sfh_wait_for_response
(privdata, cl_data->sensor_idx[i], SENSOR_ENABLED);
+
+ if (cl_data->sensor_sts[i] == SENSOR_ENABLED)
+ cl_data->is_any_sensor_enabled = true;
+ }
+
+ if (!cl_data->is_any_sensor_enabled ||
+ (mp2_ops->discovery_status && mp2_ops->discovery_status(privdata) == 0)) {
+ dev_warn(dev, "Failed to discover, sensors not enabled is %d\n",
+ cl_data->is_any_sensor_enabled);
+ rc = -EOPNOTSUPP;
+ goto cleanup;
}
for (i = 0; i < cl_data->num_hid_devices; i++) {
cl_data->cur_hid_dev = i;
if (cl_data->sensor_sts[i] == SENSOR_ENABLED) {
- cl_data->is_any_sensor_enabled = true;
rc = amdtp_hid_probe(i, cl_data);
if (rc)
goto cleanup;
@@ -305,12 +315,6 @@ int amd_sfh_hid_client_init(struct amd_mp2_dev *privdata)
cl_data->sensor_sts[i]);
}
- if (!cl_data->is_any_sensor_enabled ||
- (mp2_ops->discovery_status && mp2_ops->discovery_status(privdata) == 0)) {
- dev_warn(dev, "Failed to discover, sensors not enabled is %d\n", cl_data->is_any_sensor_enabled);
- rc = -EOPNOTSUPP;
- goto cleanup;
- }
schedule_delayed_work(&cl_data->work_buffer, msecs_to_jiffies(AMD_SFH_IDLE_LOOP));
return 0;
diff --git a/drivers/hid/bpf/Kconfig b/drivers/hid/bpf/Kconfig
index 83214bae6768..d65482e02a6c 100644
--- a/drivers/hid/bpf/Kconfig
+++ b/drivers/hid/bpf/Kconfig
@@ -3,7 +3,7 @@ menu "HID-BPF support"
config HID_BPF
bool "HID-BPF support"
- depends on BPF
+ depends on BPF_JIT
depends on BPF_SYSCALL
depends on DYNAMIC_FTRACE_WITH_DIRECT_CALLS
help
diff --git a/drivers/hid/bpf/hid_bpf_struct_ops.c b/drivers/hid/bpf/hid_bpf_struct_ops.c
index f59cce6e437f..cd696c59ba0f 100644
--- a/drivers/hid/bpf/hid_bpf_struct_ops.c
+++ b/drivers/hid/bpf/hid_bpf_struct_ops.c
@@ -183,6 +183,10 @@ static int hid_bpf_reg(void *kdata, struct bpf_link *link)
struct hid_device *hdev;
int count, err = 0;
+ /* prevent multiple attach of the same struct_ops */
+ if (ops->hdev)
+ return -EINVAL;
+
hdev = hid_get_device(ops->hid_id);
if (IS_ERR(hdev))
return PTR_ERR(hdev);
@@ -248,6 +252,7 @@ static void hid_bpf_unreg(void *kdata, struct bpf_link *link)
list_del_rcu(&ops->list);
synchronize_srcu(&hdev->bpf.srcu);
+ ops->hdev = NULL;
reconnect = hdev->bpf.rdesc_ops == ops;
if (reconnect)
diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
index a44367aef621..1f4564982b95 100644
--- a/drivers/hid/wacom_wac.c
+++ b/drivers/hid/wacom_wac.c
@@ -692,78 +692,28 @@ static bool wacom_is_art_pen(int tool_id)
static int wacom_intuos_get_tool_type(int tool_id)
{
- int tool_type = BTN_TOOL_PEN;
-
- if (wacom_is_art_pen(tool_id))
- return tool_type;
-
switch (tool_id) {
case 0x812: /* Inking pen */
case 0x801: /* Intuos3 Inking pen */
case 0x12802: /* Intuos4/5 Inking Pen */
case 0x012:
- tool_type = BTN_TOOL_PENCIL;
- break;
-
- case 0x822: /* Pen */
- case 0x842:
- case 0x852:
- case 0x823: /* Intuos3 Grip Pen */
- case 0x813: /* Intuos3 Classic Pen */
- case 0x802: /* Intuos4/5 13HD/24HD General Pen */
- case 0x8e2: /* IntuosHT2 pen */
- case 0x022:
- case 0x200: /* Pro Pen 3 */
- case 0x04200: /* Pro Pen 3 */
- case 0x10842: /* MobileStudio Pro Pro Pen slim */
- case 0x14802: /* Intuos4/5 13HD/24HD Classic Pen */
- case 0x16802: /* Cintiq 13HD Pro Pen */
- case 0x18802: /* DTH2242 Pen */
- case 0x10802: /* Intuos4/5 13HD/24HD General Pen */
- case 0x80842: /* Intuos Pro and Cintiq Pro 3D Pen */
- tool_type = BTN_TOOL_PEN;
- break;
+ return BTN_TOOL_PENCIL;
case 0x832: /* Stroke pen */
case 0x032:
- tool_type = BTN_TOOL_BRUSH;
- break;
+ return BTN_TOOL_BRUSH;
case 0x007: /* Mouse 4D and 2D */
case 0x09c:
case 0x094:
case 0x017: /* Intuos3 2D Mouse */
case 0x806: /* Intuos4 Mouse */
- tool_type = BTN_TOOL_MOUSE;
- break;
+ return BTN_TOOL_MOUSE;
case 0x096: /* Lens cursor */
case 0x097: /* Intuos3 Lens cursor */
case 0x006: /* Intuos4 Lens cursor */
- tool_type = BTN_TOOL_LENS;
- break;
-
- case 0x82a: /* Eraser */
- case 0x84a:
- case 0x85a:
- case 0x91a:
- case 0xd1a:
- case 0x0fa:
- case 0x82b: /* Intuos3 Grip Pen Eraser */
- case 0x81b: /* Intuos3 Classic Pen Eraser */
- case 0x91b: /* Intuos3 Airbrush Eraser */
- case 0x80c: /* Intuos4/5 13HD/24HD Marker Pen Eraser */
- case 0x80a: /* Intuos4/5 13HD/24HD General Pen Eraser */
- case 0x90a: /* Intuos4/5 13HD/24HD Airbrush Eraser */
- case 0x1480a: /* Intuos4/5 13HD/24HD Classic Pen Eraser */
- case 0x1090a: /* Intuos4/5 13HD/24HD Airbrush Eraser */
- case 0x1080c: /* Intuos4/5 13HD/24HD Art Pen Eraser */
- case 0x1084a: /* MobileStudio Pro Pro Pen slim Eraser */
- case 0x1680a: /* Cintiq 13HD Pro Pen Eraser */
- case 0x1880a: /* DTH2242 Eraser */
- case 0x1080a: /* Intuos4/5 13HD/24HD General Pen Eraser */
- tool_type = BTN_TOOL_RUBBER;
- break;
+ return BTN_TOOL_LENS;
case 0xd12:
case 0x912:
@@ -771,10 +721,13 @@ static int wacom_intuos_get_tool_type(int tool_id)
case 0x913: /* Intuos3 Airbrush */
case 0x902: /* Intuos4/5 13HD/24HD Airbrush */
case 0x10902: /* Intuos4/5 13HD/24HD Airbrush */
- tool_type = BTN_TOOL_AIRBRUSH;
- break;
+ return BTN_TOOL_AIRBRUSH;
+
+ default:
+ if (tool_id & 0x0008)
+ return BTN_TOOL_RUBBER;
+ return BTN_TOOL_PEN;
}
- return tool_type;
}
static void wacom_exit_report(struct wacom_wac *wacom)
diff --git a/drivers/hv/hv_common.c b/drivers/hv/hv_common.c
index 9c452bfbd571..7a35c82976e0 100644
--- a/drivers/hv/hv_common.c
+++ b/drivers/hv/hv_common.c
@@ -207,13 +207,13 @@ static int hv_die_panic_notify_crash(struct notifier_block *self,
* buffer and call into Hyper-V to transfer the data.
*/
static void hv_kmsg_dump(struct kmsg_dumper *dumper,
- enum kmsg_dump_reason reason)
+ struct kmsg_dump_detail *detail)
{
struct kmsg_dump_iter iter;
size_t bytes_written;
/* We are only interested in panics. */
- if (reason != KMSG_DUMP_PANIC || !sysctl_record_panic_msg)
+ if (detail->reason != KMSG_DUMP_PANIC || !sysctl_record_panic_msg)
return;
/*
diff --git a/drivers/hwmon/adt7475.c b/drivers/hwmon/adt7475.c
index bc186c61a2c0..382a2bb9168a 100644
--- a/drivers/hwmon/adt7475.c
+++ b/drivers/hwmon/adt7475.c
@@ -22,23 +22,23 @@
#include <linux/util_macros.h>
/* Indexes for the sysfs hooks */
-
-#define INPUT 0
-#define MIN 1
-#define MAX 2
-#define CONTROL 3
-#define OFFSET 3
-#define AUTOMIN 4
-#define THERM 5
-#define HYSTERSIS 6
-
+enum adt_sysfs_id {
+ INPUT = 0,
+ MIN = 1,
+ MAX = 2,
+ CONTROL = 3,
+ OFFSET = 3, // Dup
+ AUTOMIN = 4,
+ THERM = 5,
+ HYSTERSIS = 6,
/*
* These are unique identifiers for the sysfs functions - unlike the
* numbers above, these are not also indexes into an array
*/
+ ALARM = 9,
+ FAULT = 10,
+};
-#define ALARM 9
-#define FAULT 10
/* 7475 Common Registers */
diff --git a/drivers/input/input-mt.c b/drivers/input/input-mt.c
index 14b53dac1253..6b04a674f832 100644
--- a/drivers/input/input-mt.c
+++ b/drivers/input/input-mt.c
@@ -46,6 +46,9 @@ int input_mt_init_slots(struct input_dev *dev, unsigned int num_slots,
return 0;
if (mt)
return mt->num_slots != num_slots ? -EINVAL : 0;
+ /* Arbitrary limit for avoiding too large memory allocation. */
+ if (num_slots > 1024)
+ return -EINVAL;
mt = kzalloc(struct_size(mt, slots, num_slots), GFP_KERNEL);
if (!mt)
diff --git a/drivers/input/touchscreen/cyttsp4_core.c b/drivers/input/touchscreen/cyttsp4_core.c
index 7cb26929dc73..9dc25eb2be44 100644
--- a/drivers/input/touchscreen/cyttsp4_core.c
+++ b/drivers/input/touchscreen/cyttsp4_core.c
@@ -871,7 +871,7 @@ static void cyttsp4_get_mt_touches(struct cyttsp4_mt_data *md, int num_cur_tch)
struct cyttsp4_touch tch;
int sig;
int i, j, t = 0;
- int ids[max(CY_TMA1036_MAX_TCH, CY_TMA4XX_MAX_TCH)];
+ int ids[MAX(CY_TMA1036_MAX_TCH, CY_TMA4XX_MAX_TCH)];
memset(ids, 0, si->si_ofs.tch_abs[CY_TCH_T].max * sizeof(int));
for (i = 0; i < num_cur_tch; i++) {
diff --git a/drivers/irqchip/irq-loongarch-cpu.c b/drivers/irqchip/irq-loongarch-cpu.c
index 9d8f2c406043..b35903a06902 100644
--- a/drivers/irqchip/irq-loongarch-cpu.c
+++ b/drivers/irqchip/irq-loongarch-cpu.c
@@ -18,11 +18,13 @@ struct fwnode_handle *cpuintc_handle;
static u32 lpic_gsi_to_irq(u32 gsi)
{
+ int irq = 0;
+
/* Only pch irqdomain transferring is required for LoongArch. */
if (gsi >= GSI_MIN_PCH_IRQ && gsi <= GSI_MAX_PCH_IRQ)
- return acpi_register_gsi(NULL, gsi, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_HIGH);
+ irq = acpi_register_gsi(NULL, gsi, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_HIGH);
- return 0;
+ return (irq > 0) ? irq : 0;
}
static struct fwnode_handle *lpic_get_gsi_domain_id(u32 gsi)
diff --git a/drivers/irqchip/irq-mbigen.c b/drivers/irqchip/irq-mbigen.c
index 093fd42893a7..53cc08387588 100644
--- a/drivers/irqchip/irq-mbigen.c
+++ b/drivers/irqchip/irq-mbigen.c
@@ -64,6 +64,20 @@ struct mbigen_device {
void __iomem *base;
};
+static inline unsigned int get_mbigen_node_offset(unsigned int nid)
+{
+ unsigned int offset = nid * MBIGEN_NODE_OFFSET;
+
+ /*
+ * To avoid touched clear register in unexpected way, we need to directly
+ * skip clear register when access to more than 10 mbigen nodes.
+ */
+ if (nid >= (REG_MBIGEN_CLEAR_OFFSET / MBIGEN_NODE_OFFSET))
+ offset += MBIGEN_NODE_OFFSET;
+
+ return offset;
+}
+
static inline unsigned int get_mbigen_vec_reg(irq_hw_number_t hwirq)
{
unsigned int nid, pin;
@@ -72,8 +86,7 @@ static inline unsigned int get_mbigen_vec_reg(irq_hw_number_t hwirq)
nid = hwirq / IRQS_PER_MBIGEN_NODE + 1;
pin = hwirq % IRQS_PER_MBIGEN_NODE;
- return pin * 4 + nid * MBIGEN_NODE_OFFSET
- + REG_MBIGEN_VEC_OFFSET;
+ return pin * 4 + get_mbigen_node_offset(nid) + REG_MBIGEN_VEC_OFFSET;
}
static inline void get_mbigen_type_reg(irq_hw_number_t hwirq,
@@ -88,8 +101,7 @@ static inline void get_mbigen_type_reg(irq_hw_number_t hwirq,
*mask = 1 << (irq_ofst % 32);
ofst = irq_ofst / 32 * 4;
- *addr = ofst + nid * MBIGEN_NODE_OFFSET
- + REG_MBIGEN_TYPE_OFFSET;
+ *addr = ofst + get_mbigen_node_offset(nid) + REG_MBIGEN_TYPE_OFFSET;
}
static inline void get_mbigen_clear_reg(irq_hw_number_t hwirq,
diff --git a/drivers/irqchip/irq-meson-gpio.c b/drivers/irqchip/irq-meson-gpio.c
index 27e30ce41db3..cd789fa51519 100644
--- a/drivers/irqchip/irq-meson-gpio.c
+++ b/drivers/irqchip/irq-meson-gpio.c
@@ -178,7 +178,7 @@ struct meson_gpio_irq_controller {
void __iomem *base;
u32 channel_irqs[MAX_NUM_CHANNEL];
DECLARE_BITMAP(channel_map, MAX_NUM_CHANNEL);
- spinlock_t lock;
+ raw_spinlock_t lock;
};
static void meson_gpio_irq_update_bits(struct meson_gpio_irq_controller *ctl,
@@ -187,14 +187,14 @@ static void meson_gpio_irq_update_bits(struct meson_gpio_irq_controller *ctl,
unsigned long flags;
u32 tmp;
- spin_lock_irqsave(&ctl->lock, flags);
+ raw_spin_lock_irqsave(&ctl->lock, flags);
tmp = readl_relaxed(ctl->base + reg);
tmp &= ~mask;
tmp |= val;
writel_relaxed(tmp, ctl->base + reg);
- spin_unlock_irqrestore(&ctl->lock, flags);
+ raw_spin_unlock_irqrestore(&ctl->lock, flags);
}
static void meson_gpio_irq_init_dummy(struct meson_gpio_irq_controller *ctl)
@@ -244,12 +244,12 @@ meson_gpio_irq_request_channel(struct meson_gpio_irq_controller *ctl,
unsigned long flags;
unsigned int idx;
- spin_lock_irqsave(&ctl->lock, flags);
+ raw_spin_lock_irqsave(&ctl->lock, flags);
/* Find a free channel */
idx = find_first_zero_bit(ctl->channel_map, ctl->params->nr_channels);
if (idx >= ctl->params->nr_channels) {
- spin_unlock_irqrestore(&ctl->lock, flags);
+ raw_spin_unlock_irqrestore(&ctl->lock, flags);
pr_err("No channel available\n");
return -ENOSPC;
}
@@ -257,7 +257,7 @@ meson_gpio_irq_request_channel(struct meson_gpio_irq_controller *ctl,
/* Mark the channel as used */
set_bit(idx, ctl->channel_map);
- spin_unlock_irqrestore(&ctl->lock, flags);
+ raw_spin_unlock_irqrestore(&ctl->lock, flags);
/*
* Setup the mux of the channel to route the signal of the pad
@@ -567,7 +567,7 @@ static int meson_gpio_irq_of_init(struct device_node *node, struct device_node *
if (!ctl)
return -ENOMEM;
- spin_lock_init(&ctl->lock);
+ raw_spin_lock_init(&ctl->lock);
ctl->base = of_iomap(node, 0);
if (!ctl->base) {
diff --git a/drivers/irqchip/irq-pic32-evic.c b/drivers/irqchip/irq-pic32-evic.c
index 5d6b8e025bb8..eb6ca516a166 100644
--- a/drivers/irqchip/irq-pic32-evic.c
+++ b/drivers/irqchip/irq-pic32-evic.c
@@ -161,9 +161,9 @@ static int pic32_irq_domain_map(struct irq_domain *d, unsigned int virq,
return ret;
}
-int pic32_irq_domain_xlate(struct irq_domain *d, struct device_node *ctrlr,
- const u32 *intspec, unsigned int intsize,
- irq_hw_number_t *out_hwirq, unsigned int *out_type)
+static int pic32_irq_domain_xlate(struct irq_domain *d, struct device_node *ctrlr,
+ const u32 *intspec, unsigned int intsize,
+ irq_hw_number_t *out_hwirq, unsigned int *out_type)
{
struct evic_chip_data *priv = d->host_data;
diff --git a/drivers/irqchip/irq-sun6i-r.c b/drivers/irqchip/irq-sun6i-r.c
index a01e44049415..99958d470d62 100644
--- a/drivers/irqchip/irq-sun6i-r.c
+++ b/drivers/irqchip/irq-sun6i-r.c
@@ -270,7 +270,7 @@ static const struct irq_domain_ops sun6i_r_intc_domain_ops = {
static int sun6i_r_intc_suspend(void)
{
- u32 buf[BITS_TO_U32(max(SUN6I_NR_TOP_LEVEL_IRQS, SUN6I_NR_MUX_BITS))];
+ u32 buf[BITS_TO_U32(MAX(SUN6I_NR_TOP_LEVEL_IRQS, SUN6I_NR_MUX_BITS))];
int i;
/* Wake IRQs are enabled during system sleep and shutdown. */
diff --git a/drivers/media/dvb-frontends/stv0367_priv.h b/drivers/media/dvb-frontends/stv0367_priv.h
index 617f605947b2..7f056d1cce82 100644
--- a/drivers/media/dvb-frontends/stv0367_priv.h
+++ b/drivers/media/dvb-frontends/stv0367_priv.h
@@ -25,8 +25,11 @@
#endif
/* MACRO definitions */
+#ifndef MIN
#define MAX(X, Y) ((X) >= (Y) ? (X) : (Y))
#define MIN(X, Y) ((X) <= (Y) ? (X) : (Y))
+#endif
+
#define INRANGE(X, Y, Z) \
((((X) <= (Y)) && ((Y) <= (Z))) || \
(((Z) <= (Y)) && ((Y) <= (X))) ? 1 : 0)
diff --git a/drivers/media/pci/intel/ipu6/Kconfig b/drivers/media/pci/intel/ipu6/Kconfig
index 154343080c82..40e20f0aa5ae 100644
--- a/drivers/media/pci/intel/ipu6/Kconfig
+++ b/drivers/media/pci/intel/ipu6/Kconfig
@@ -3,13 +3,14 @@ config VIDEO_INTEL_IPU6
depends on ACPI || COMPILE_TEST
depends on VIDEO_DEV
depends on X86 && X86_64 && HAS_DMA
+ depends on IPU_BRIDGE || !IPU_BRIDGE
+ select AUXILIARY_BUS
select DMA_OPS
select IOMMU_IOVA
select VIDEO_V4L2_SUBDEV_API
select MEDIA_CONTROLLER
select VIDEOBUF2_DMA_CONTIG
select V4L2_FWNODE
- select IPU_BRIDGE
help
This is the 6th Gen Intel Image Processing Unit, found in Intel SoCs
and used for capturing images and video from camera sensors.
diff --git a/drivers/media/usb/uvc/uvc_ctrl.c b/drivers/media/usb/uvc/uvc_ctrl.c
index 0136df5732ba..4fe26e82e3d1 100644
--- a/drivers/media/usb/uvc/uvc_ctrl.c
+++ b/drivers/media/usb/uvc/uvc_ctrl.c
@@ -2680,6 +2680,10 @@ static void uvc_ctrl_init_ctrl(struct uvc_video_chain *chain,
for (i = 0; i < ARRAY_SIZE(uvc_ctrl_mappings); ++i) {
const struct uvc_control_mapping *mapping = &uvc_ctrl_mappings[i];
+ if (!uvc_entity_match_guid(ctrl->entity, mapping->entity) ||
+ ctrl->info.selector != mapping->selector)
+ continue;
+
/* Let the device provide a custom mapping. */
if (mapping->filter_mapping) {
mapping = mapping->filter_mapping(chain, ctrl);
@@ -2687,9 +2691,7 @@ static void uvc_ctrl_init_ctrl(struct uvc_video_chain *chain,
continue;
}
- if (uvc_entity_match_guid(ctrl->entity, mapping->entity) &&
- ctrl->info.selector == mapping->selector)
- __uvc_ctrl_add_mapping(chain, ctrl, mapping);
+ __uvc_ctrl_add_mapping(chain, ctrl, mapping);
}
}
diff --git a/drivers/mtd/mtdoops.c b/drivers/mtd/mtdoops.c
index 2f11585b5613..7bf3777e1f13 100644
--- a/drivers/mtd/mtdoops.c
+++ b/drivers/mtd/mtdoops.c
@@ -298,14 +298,14 @@ static void find_next_position(struct mtdoops_context *cxt)
}
static void mtdoops_do_dump(struct kmsg_dumper *dumper,
- enum kmsg_dump_reason reason)
+ struct kmsg_dump_detail *detail)
{
struct mtdoops_context *cxt = container_of(dumper,
struct mtdoops_context, dump);
struct kmsg_dump_iter iter;
/* Only dump oopses if dump_oops is set */
- if (reason == KMSG_DUMP_OOPS && !dump_oops)
+ if (detail->reason == KMSG_DUMP_OOPS && !dump_oops)
return;
kmsg_dump_rewind(&iter);
@@ -317,7 +317,7 @@ static void mtdoops_do_dump(struct kmsg_dumper *dumper,
record_size - sizeof(struct mtdoops_hdr), NULL);
clear_bit(0, &cxt->oops_buf_busy);
- if (reason != KMSG_DUMP_OOPS) {
+ if (detail->reason != KMSG_DUMP_OOPS) {
/* Panics must be written immediately */
mtdoops_write(cxt, 1);
} else {
diff --git a/drivers/net/can/usb/etas_es58x/es58x_devlink.c b/drivers/net/can/usb/etas_es58x/es58x_devlink.c
index 635edeb8f68c..eee20839d96f 100644
--- a/drivers/net/can/usb/etas_es58x/es58x_devlink.c
+++ b/drivers/net/can/usb/etas_es58x/es58x_devlink.c
@@ -215,7 +215,7 @@ static int es58x_devlink_info_get(struct devlink *devlink,
struct es58x_sw_version *fw_ver = &es58x_dev->firmware_version;
struct es58x_sw_version *bl_ver = &es58x_dev->bootloader_version;
struct es58x_hw_revision *hw_rev = &es58x_dev->hardware_revision;
- char buf[max(sizeof("xx.xx.xx"), sizeof("axxx/xxx"))];
+ char buf[MAX(sizeof("xx.xx.xx"), sizeof("axxx/xxx"))];
int ret = 0;
if (es58x_sw_version_is_valid(fw_ver)) {
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index ffa74c26ee53..23f74c6c88b9 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -7649,8 +7649,8 @@ static int bnxt_get_avail_msix(struct bnxt *bp, int num);
static int __bnxt_reserve_rings(struct bnxt *bp)
{
struct bnxt_hw_rings hwr = {0};
+ int rx_rings, old_rx_rings, rc;
int cp = bp->cp_nr_rings;
- int rx_rings, rc;
int ulp_msix = 0;
bool sh = false;
int tx_cp;
@@ -7684,6 +7684,7 @@ static int __bnxt_reserve_rings(struct bnxt *bp)
hwr.grp = bp->rx_nr_rings;
hwr.rss_ctx = bnxt_get_total_rss_ctxs(bp, &hwr);
hwr.stat = bnxt_get_func_stat_ctxs(bp);
+ old_rx_rings = bp->hw_resc.resv_rx_rings;
rc = bnxt_hwrm_reserve_rings(bp, &hwr);
if (rc)
@@ -7738,7 +7739,8 @@ static int __bnxt_reserve_rings(struct bnxt *bp)
if (!bnxt_rings_ok(bp, &hwr))
return -ENOMEM;
- if (!netif_is_rxfh_configured(bp->dev))
+ if (old_rx_rings != bp->hw_resc.resv_rx_rings &&
+ !netif_is_rxfh_configured(bp->dev))
bnxt_set_dflt_rss_indir_tbl(bp, NULL);
if (!bnxt_ulp_registered(bp->edev) && BNXT_NEW_RM(bp)) {
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
index d00ef0063820..ab8e3f197e7b 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -1863,8 +1863,14 @@ static void bnxt_modify_rss(struct bnxt *bp, struct ethtool_rxfh_context *ctx,
}
static int bnxt_rxfh_context_check(struct bnxt *bp,
+ const struct ethtool_rxfh_param *rxfh,
struct netlink_ext_ack *extack)
{
+ if (rxfh->hfunc && rxfh->hfunc != ETH_RSS_HASH_TOP) {
+ NL_SET_ERR_MSG_MOD(extack, "RSS hash function not supported");
+ return -EOPNOTSUPP;
+ }
+
if (!BNXT_SUPPORTS_MULTI_RSS_CTX(bp)) {
NL_SET_ERR_MSG_MOD(extack, "RSS contexts not supported");
return -EOPNOTSUPP;
@@ -1888,7 +1894,7 @@ static int bnxt_create_rxfh_context(struct net_device *dev,
struct bnxt_vnic_info *vnic;
int rc;
- rc = bnxt_rxfh_context_check(bp, extack);
+ rc = bnxt_rxfh_context_check(bp, rxfh, extack);
if (rc)
return rc;
@@ -1915,8 +1921,12 @@ static int bnxt_create_rxfh_context(struct net_device *dev,
if (rc)
goto out;
+ /* Populate defaults in the context */
bnxt_set_dflt_rss_indir_tbl(bp, ctx);
+ ctx->hfunc = ETH_RSS_HASH_TOP;
memcpy(vnic->rss_hash_key, bp->rss_hash_key, HW_HASH_KEY_SIZE);
+ memcpy(ethtool_rxfh_context_key(ctx),
+ bp->rss_hash_key, HW_HASH_KEY_SIZE);
rc = bnxt_hwrm_vnic_alloc(bp, vnic, 0, bp->rx_nr_rings);
if (rc) {
@@ -1953,7 +1963,7 @@ static int bnxt_modify_rxfh_context(struct net_device *dev,
struct bnxt_rss_ctx *rss_ctx;
int rc;
- rc = bnxt_rxfh_context_check(bp, extack);
+ rc = bnxt_rxfh_context_check(bp, rxfh, extack);
if (rc)
return rc;
diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
index 99a75a59078e..caaa10157909 100644
--- a/drivers/net/ethernet/intel/ice/ice.h
+++ b/drivers/net/ethernet/intel/ice/ice.h
@@ -765,18 +765,17 @@ static inline struct xsk_buff_pool *ice_get_xp_from_qid(struct ice_vsi *vsi,
}
/**
- * ice_xsk_pool - get XSK buffer pool bound to a ring
+ * ice_rx_xsk_pool - assign XSK buff pool to Rx ring
* @ring: Rx ring to use
*
- * Returns a pointer to xsk_buff_pool structure if there is a buffer pool
- * present, NULL otherwise.
+ * Sets XSK buff pool pointer on Rx ring.
*/
-static inline struct xsk_buff_pool *ice_xsk_pool(struct ice_rx_ring *ring)
+static inline void ice_rx_xsk_pool(struct ice_rx_ring *ring)
{
struct ice_vsi *vsi = ring->vsi;
u16 qid = ring->q_index;
- return ice_get_xp_from_qid(vsi, qid);
+ WRITE_ONCE(ring->xsk_pool, ice_get_xp_from_qid(vsi, qid));
}
/**
@@ -801,7 +800,7 @@ static inline void ice_tx_xsk_pool(struct ice_vsi *vsi, u16 qid)
if (!ring)
return;
- ring->xsk_pool = ice_get_xp_from_qid(vsi, qid);
+ WRITE_ONCE(ring->xsk_pool, ice_get_xp_from_qid(vsi, qid));
}
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_base.c b/drivers/net/ethernet/intel/ice/ice_base.c
index 5d396c1a7731..1facf179a96f 100644
--- a/drivers/net/ethernet/intel/ice/ice_base.c
+++ b/drivers/net/ethernet/intel/ice/ice_base.c
@@ -536,7 +536,7 @@ static int ice_vsi_cfg_rxq(struct ice_rx_ring *ring)
return err;
}
- ring->xsk_pool = ice_xsk_pool(ring);
+ ice_rx_xsk_pool(ring);
if (ring->xsk_pool) {
xdp_rxq_info_unreg(&ring->xdp_rxq);
@@ -597,7 +597,7 @@ static int ice_vsi_cfg_rxq(struct ice_rx_ring *ring)
return 0;
}
- ok = ice_alloc_rx_bufs_zc(ring, num_bufs);
+ ok = ice_alloc_rx_bufs_zc(ring, ring->xsk_pool, num_bufs);
if (!ok) {
u16 pf_q = ring->vsi->rxq_map[ring->q_index];
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index ec636be4d17d..3de020020bc4 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -2948,7 +2948,7 @@ static void ice_vsi_rx_napi_schedule(struct ice_vsi *vsi)
ice_for_each_rxq(vsi, i) {
struct ice_rx_ring *rx_ring = vsi->rx_rings[i];
- if (rx_ring->xsk_pool)
+ if (READ_ONCE(rx_ring->xsk_pool))
napi_schedule(&rx_ring->q_vector->napi);
}
}
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c
index 8bb743f78fcb..8d25b6981269 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.c
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.c
@@ -456,7 +456,7 @@ void ice_free_rx_ring(struct ice_rx_ring *rx_ring)
if (rx_ring->vsi->type == ICE_VSI_PF)
if (xdp_rxq_info_is_reg(&rx_ring->xdp_rxq))
xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
- rx_ring->xdp_prog = NULL;
+ WRITE_ONCE(rx_ring->xdp_prog, NULL);
if (rx_ring->xsk_pool) {
kfree(rx_ring->xdp_buf);
rx_ring->xdp_buf = NULL;
@@ -1521,10 +1521,11 @@ int ice_napi_poll(struct napi_struct *napi, int budget)
* budget and be more aggressive about cleaning up the Tx descriptors.
*/
ice_for_each_tx_ring(tx_ring, q_vector->tx) {
+ struct xsk_buff_pool *xsk_pool = READ_ONCE(tx_ring->xsk_pool);
bool wd;
- if (tx_ring->xsk_pool)
- wd = ice_xmit_zc(tx_ring);
+ if (xsk_pool)
+ wd = ice_xmit_zc(tx_ring, xsk_pool);
else if (ice_ring_is_xdp(tx_ring))
wd = true;
else
@@ -1550,6 +1551,7 @@ int ice_napi_poll(struct napi_struct *napi, int budget)
budget_per_ring = budget;
ice_for_each_rx_ring(rx_ring, q_vector->rx) {
+ struct xsk_buff_pool *xsk_pool = READ_ONCE(rx_ring->xsk_pool);
int cleaned;
/* A dedicated path for zero-copy allows making a single
@@ -1557,7 +1559,7 @@ int ice_napi_poll(struct napi_struct *napi, int budget)
* ice_clean_rx_irq function and makes the codebase cleaner.
*/
cleaned = rx_ring->xsk_pool ?
- ice_clean_rx_irq_zc(rx_ring, budget_per_ring) :
+ ice_clean_rx_irq_zc(rx_ring, xsk_pool, budget_per_ring) :
ice_clean_rx_irq(rx_ring, budget_per_ring);
work_done += cleaned;
/* if we clean as many as budgeted, we must not be done */
diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c
index a65955eb23c0..240a7bec242b 100644
--- a/drivers/net/ethernet/intel/ice/ice_xsk.c
+++ b/drivers/net/ethernet/intel/ice/ice_xsk.c
@@ -52,10 +52,8 @@ static void ice_qp_reset_stats(struct ice_vsi *vsi, u16 q_idx)
static void ice_qp_clean_rings(struct ice_vsi *vsi, u16 q_idx)
{
ice_clean_tx_ring(vsi->tx_rings[q_idx]);
- if (ice_is_xdp_ena_vsi(vsi)) {
- synchronize_rcu();
+ if (ice_is_xdp_ena_vsi(vsi))
ice_clean_tx_ring(vsi->xdp_rings[q_idx]);
- }
ice_clean_rx_ring(vsi->rx_rings[q_idx]);
}
@@ -112,25 +110,29 @@ ice_qvec_dis_irq(struct ice_vsi *vsi, struct ice_rx_ring *rx_ring,
* ice_qvec_cfg_msix - Enable IRQ for given queue vector
* @vsi: the VSI that contains queue vector
* @q_vector: queue vector
+ * @qid: queue index
*/
static void
-ice_qvec_cfg_msix(struct ice_vsi *vsi, struct ice_q_vector *q_vector)
+ice_qvec_cfg_msix(struct ice_vsi *vsi, struct ice_q_vector *q_vector, u16 qid)
{
u16 reg_idx = q_vector->reg_idx;
struct ice_pf *pf = vsi->back;
struct ice_hw *hw = &pf->hw;
- struct ice_tx_ring *tx_ring;
- struct ice_rx_ring *rx_ring;
+ int q, _qid = qid;
ice_cfg_itr(hw, q_vector);
- ice_for_each_tx_ring(tx_ring, q_vector->tx)
- ice_cfg_txq_interrupt(vsi, tx_ring->reg_idx, reg_idx,
- q_vector->tx.itr_idx);
+ for (q = 0; q < q_vector->num_ring_tx; q++) {
+ ice_cfg_txq_interrupt(vsi, _qid, reg_idx, q_vector->tx.itr_idx);
+ _qid++;
+ }
+
+ _qid = qid;
- ice_for_each_rx_ring(rx_ring, q_vector->rx)
- ice_cfg_rxq_interrupt(vsi, rx_ring->reg_idx, reg_idx,
- q_vector->rx.itr_idx);
+ for (q = 0; q < q_vector->num_ring_rx; q++) {
+ ice_cfg_rxq_interrupt(vsi, _qid, reg_idx, q_vector->rx.itr_idx);
+ _qid++;
+ }
ice_flush(hw);
}
@@ -164,6 +166,7 @@ static int ice_qp_dis(struct ice_vsi *vsi, u16 q_idx)
struct ice_tx_ring *tx_ring;
struct ice_rx_ring *rx_ring;
int timeout = 50;
+ int fail = 0;
int err;
if (q_idx >= vsi->num_rxq || q_idx >= vsi->num_txq)
@@ -180,15 +183,17 @@ static int ice_qp_dis(struct ice_vsi *vsi, u16 q_idx)
usleep_range(1000, 2000);
}
+ synchronize_net();
+ netif_carrier_off(vsi->netdev);
+ netif_tx_stop_queue(netdev_get_tx_queue(vsi->netdev, q_idx));
+
ice_qvec_dis_irq(vsi, rx_ring, q_vector);
ice_qvec_toggle_napi(vsi, q_vector, false);
- netif_tx_stop_queue(netdev_get_tx_queue(vsi->netdev, q_idx));
-
ice_fill_txq_meta(vsi, tx_ring, &txq_meta);
err = ice_vsi_stop_tx_ring(vsi, ICE_NO_RESET, 0, tx_ring, &txq_meta);
- if (err)
- return err;
+ if (!fail)
+ fail = err;
if (ice_is_xdp_ena_vsi(vsi)) {
struct ice_tx_ring *xdp_ring = vsi->xdp_rings[q_idx];
@@ -196,17 +201,15 @@ static int ice_qp_dis(struct ice_vsi *vsi, u16 q_idx)
ice_fill_txq_meta(vsi, xdp_ring, &txq_meta);
err = ice_vsi_stop_tx_ring(vsi, ICE_NO_RESET, 0, xdp_ring,
&txq_meta);
- if (err)
- return err;
+ if (!fail)
+ fail = err;
}
- err = ice_vsi_ctrl_one_rx_ring(vsi, false, q_idx, true);
- if (err)
- return err;
+ ice_vsi_ctrl_one_rx_ring(vsi, false, q_idx, false);
ice_qp_clean_rings(vsi, q_idx);
ice_qp_reset_stats(vsi, q_idx);
- return 0;
+ return fail;
}
/**
@@ -219,40 +222,48 @@ static int ice_qp_dis(struct ice_vsi *vsi, u16 q_idx)
static int ice_qp_ena(struct ice_vsi *vsi, u16 q_idx)
{
struct ice_q_vector *q_vector;
+ int fail = 0;
+ bool link_up;
int err;
err = ice_vsi_cfg_single_txq(vsi, vsi->tx_rings, q_idx);
- if (err)
- return err;
+ if (!fail)
+ fail = err;
if (ice_is_xdp_ena_vsi(vsi)) {
struct ice_tx_ring *xdp_ring = vsi->xdp_rings[q_idx];
err = ice_vsi_cfg_single_txq(vsi, vsi->xdp_rings, q_idx);
- if (err)
- return err;
+ if (!fail)
+ fail = err;
ice_set_ring_xdp(xdp_ring);
ice_tx_xsk_pool(vsi, q_idx);
}
err = ice_vsi_cfg_single_rxq(vsi, q_idx);
- if (err)
- return err;
+ if (!fail)
+ fail = err;
q_vector = vsi->rx_rings[q_idx]->q_vector;
- ice_qvec_cfg_msix(vsi, q_vector);
+ ice_qvec_cfg_msix(vsi, q_vector, q_idx);
err = ice_vsi_ctrl_one_rx_ring(vsi, true, q_idx, true);
- if (err)
- return err;
+ if (!fail)
+ fail = err;
ice_qvec_toggle_napi(vsi, q_vector, true);
ice_qvec_ena_irq(vsi, q_vector);
- netif_tx_start_queue(netdev_get_tx_queue(vsi->netdev, q_idx));
+ /* make sure NAPI sees updated ice_{t,x}_ring::xsk_pool */
+ synchronize_net();
+ ice_get_link_status(vsi->port_info, &link_up);
+ if (link_up) {
+ netif_tx_start_queue(netdev_get_tx_queue(vsi->netdev, q_idx));
+ netif_carrier_on(vsi->netdev);
+ }
clear_bit(ICE_CFG_BUSY, vsi->state);
- return 0;
+ return fail;
}
/**
@@ -459,6 +470,7 @@ static u16 ice_fill_rx_descs(struct xsk_buff_pool *pool, struct xdp_buff **xdp,
/**
* __ice_alloc_rx_bufs_zc - allocate a number of Rx buffers
* @rx_ring: Rx ring
+ * @xsk_pool: XSK buffer pool to pick buffers to be filled by HW
* @count: The number of buffers to allocate
*
* Place the @count of descriptors onto Rx ring. Handle the ring wrap
@@ -467,7 +479,8 @@ static u16 ice_fill_rx_descs(struct xsk_buff_pool *pool, struct xdp_buff **xdp,
*
* Returns true if all allocations were successful, false if any fail.
*/
-static bool __ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring, u16 count)
+static bool __ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring,
+ struct xsk_buff_pool *xsk_pool, u16 count)
{
u32 nb_buffs_extra = 0, nb_buffs = 0;
union ice_32b_rx_flex_desc *rx_desc;
@@ -479,8 +492,7 @@ static bool __ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring, u16 count)
xdp = ice_xdp_buf(rx_ring, ntu);
if (ntu + count >= rx_ring->count) {
- nb_buffs_extra = ice_fill_rx_descs(rx_ring->xsk_pool, xdp,
- rx_desc,
+ nb_buffs_extra = ice_fill_rx_descs(xsk_pool, xdp, rx_desc,
rx_ring->count - ntu);
if (nb_buffs_extra != rx_ring->count - ntu) {
ntu += nb_buffs_extra;
@@ -493,7 +505,7 @@ static bool __ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring, u16 count)
ice_release_rx_desc(rx_ring, 0);
}
- nb_buffs = ice_fill_rx_descs(rx_ring->xsk_pool, xdp, rx_desc, count);
+ nb_buffs = ice_fill_rx_descs(xsk_pool, xdp, rx_desc, count);
ntu += nb_buffs;
if (ntu == rx_ring->count)
@@ -509,6 +521,7 @@ exit:
/**
* ice_alloc_rx_bufs_zc - allocate a number of Rx buffers
* @rx_ring: Rx ring
+ * @xsk_pool: XSK buffer pool to pick buffers to be filled by HW
* @count: The number of buffers to allocate
*
* Wrapper for internal allocation routine; figure out how many tail
@@ -516,7 +529,8 @@ exit:
*
* Returns true if all calls to internal alloc routine succeeded
*/
-bool ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring, u16 count)
+bool ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring,
+ struct xsk_buff_pool *xsk_pool, u16 count)
{
u16 rx_thresh = ICE_RING_QUARTER(rx_ring);
u16 leftover, i, tail_bumps;
@@ -525,9 +539,9 @@ bool ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring, u16 count)
leftover = count - (tail_bumps * rx_thresh);
for (i = 0; i < tail_bumps; i++)
- if (!__ice_alloc_rx_bufs_zc(rx_ring, rx_thresh))
+ if (!__ice_alloc_rx_bufs_zc(rx_ring, xsk_pool, rx_thresh))
return false;
- return __ice_alloc_rx_bufs_zc(rx_ring, leftover);
+ return __ice_alloc_rx_bufs_zc(rx_ring, xsk_pool, leftover);
}
/**
@@ -596,8 +610,10 @@ out:
/**
* ice_clean_xdp_irq_zc - produce AF_XDP descriptors to CQ
* @xdp_ring: XDP Tx ring
+ * @xsk_pool: AF_XDP buffer pool pointer
*/
-static u32 ice_clean_xdp_irq_zc(struct ice_tx_ring *xdp_ring)
+static u32 ice_clean_xdp_irq_zc(struct ice_tx_ring *xdp_ring,
+ struct xsk_buff_pool *xsk_pool)
{
u16 ntc = xdp_ring->next_to_clean;
struct ice_tx_desc *tx_desc;
@@ -648,7 +664,7 @@ skip:
if (xdp_ring->next_to_clean >= cnt)
xdp_ring->next_to_clean -= cnt;
if (xsk_frames)
- xsk_tx_completed(xdp_ring->xsk_pool, xsk_frames);
+ xsk_tx_completed(xsk_pool, xsk_frames);
return completed_frames;
}
@@ -657,6 +673,7 @@ skip:
* ice_xmit_xdp_tx_zc - AF_XDP ZC handler for XDP_TX
* @xdp: XDP buffer to xmit
* @xdp_ring: XDP ring to produce descriptor onto
+ * @xsk_pool: AF_XDP buffer pool pointer
*
* note that this function works directly on xdp_buff, no need to convert
* it to xdp_frame. xdp_buff pointer is stored to ice_tx_buf so that cleaning
@@ -666,7 +683,8 @@ skip:
* was not enough space on XDP ring
*/
static int ice_xmit_xdp_tx_zc(struct xdp_buff *xdp,
- struct ice_tx_ring *xdp_ring)
+ struct ice_tx_ring *xdp_ring,
+ struct xsk_buff_pool *xsk_pool)
{
struct skb_shared_info *sinfo = NULL;
u32 size = xdp->data_end - xdp->data;
@@ -680,7 +698,7 @@ static int ice_xmit_xdp_tx_zc(struct xdp_buff *xdp,
free_space = ICE_DESC_UNUSED(xdp_ring);
if (free_space < ICE_RING_QUARTER(xdp_ring))
- free_space += ice_clean_xdp_irq_zc(xdp_ring);
+ free_space += ice_clean_xdp_irq_zc(xdp_ring, xsk_pool);
if (unlikely(!free_space))
goto busy;
@@ -700,7 +718,7 @@ static int ice_xmit_xdp_tx_zc(struct xdp_buff *xdp,
dma_addr_t dma;
dma = xsk_buff_xdp_get_dma(xdp);
- xsk_buff_raw_dma_sync_for_device(xdp_ring->xsk_pool, dma, size);
+ xsk_buff_raw_dma_sync_for_device(xsk_pool, dma, size);
tx_buf->xdp = xdp;
tx_buf->type = ICE_TX_BUF_XSK_TX;
@@ -742,12 +760,14 @@ busy:
* @xdp: xdp_buff used as input to the XDP program
* @xdp_prog: XDP program to run
* @xdp_ring: ring to be used for XDP_TX action
+ * @xsk_pool: AF_XDP buffer pool pointer
*
* Returns any of ICE_XDP_{PASS, CONSUMED, TX, REDIR}
*/
static int
ice_run_xdp_zc(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp,
- struct bpf_prog *xdp_prog, struct ice_tx_ring *xdp_ring)
+ struct bpf_prog *xdp_prog, struct ice_tx_ring *xdp_ring,
+ struct xsk_buff_pool *xsk_pool)
{
int err, result = ICE_XDP_PASS;
u32 act;
@@ -758,7 +778,7 @@ ice_run_xdp_zc(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp,
err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
if (!err)
return ICE_XDP_REDIR;
- if (xsk_uses_need_wakeup(rx_ring->xsk_pool) && err == -ENOBUFS)
+ if (xsk_uses_need_wakeup(xsk_pool) && err == -ENOBUFS)
result = ICE_XDP_EXIT;
else
result = ICE_XDP_CONSUMED;
@@ -769,7 +789,7 @@ ice_run_xdp_zc(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp,
case XDP_PASS:
break;
case XDP_TX:
- result = ice_xmit_xdp_tx_zc(xdp, xdp_ring);
+ result = ice_xmit_xdp_tx_zc(xdp, xdp_ring, xsk_pool);
if (result == ICE_XDP_CONSUMED)
goto out_failure;
break;
@@ -821,14 +841,16 @@ ice_add_xsk_frag(struct ice_rx_ring *rx_ring, struct xdp_buff *first,
/**
* ice_clean_rx_irq_zc - consumes packets from the hardware ring
* @rx_ring: AF_XDP Rx ring
+ * @xsk_pool: AF_XDP buffer pool pointer
* @budget: NAPI budget
*
* Returns number of processed packets on success, remaining budget on failure.
*/
-int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, int budget)
+int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring,
+ struct xsk_buff_pool *xsk_pool,
+ int budget)
{
unsigned int total_rx_bytes = 0, total_rx_packets = 0;
- struct xsk_buff_pool *xsk_pool = rx_ring->xsk_pool;
u32 ntc = rx_ring->next_to_clean;
u32 ntu = rx_ring->next_to_use;
struct xdp_buff *first = NULL;
@@ -891,7 +913,8 @@ int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, int budget)
if (ice_is_non_eop(rx_ring, rx_desc))
continue;
- xdp_res = ice_run_xdp_zc(rx_ring, first, xdp_prog, xdp_ring);
+ xdp_res = ice_run_xdp_zc(rx_ring, first, xdp_prog, xdp_ring,
+ xsk_pool);
if (likely(xdp_res & (ICE_XDP_TX | ICE_XDP_REDIR))) {
xdp_xmit |= xdp_res;
} else if (xdp_res == ICE_XDP_EXIT) {
@@ -940,7 +963,8 @@ construct_skb:
rx_ring->next_to_clean = ntc;
entries_to_alloc = ICE_RX_DESC_UNUSED(rx_ring);
if (entries_to_alloc > ICE_RING_QUARTER(rx_ring))
- failure |= !ice_alloc_rx_bufs_zc(rx_ring, entries_to_alloc);
+ failure |= !ice_alloc_rx_bufs_zc(rx_ring, xsk_pool,
+ entries_to_alloc);
ice_finalize_xdp_rx(xdp_ring, xdp_xmit, 0);
ice_update_rx_ring_stats(rx_ring, total_rx_packets, total_rx_bytes);
@@ -963,17 +987,19 @@ construct_skb:
/**
* ice_xmit_pkt - produce a single HW Tx descriptor out of AF_XDP descriptor
* @xdp_ring: XDP ring to produce the HW Tx descriptor on
+ * @xsk_pool: XSK buffer pool to pick buffers to be consumed by HW
* @desc: AF_XDP descriptor to pull the DMA address and length from
* @total_bytes: bytes accumulator that will be used for stats update
*/
-static void ice_xmit_pkt(struct ice_tx_ring *xdp_ring, struct xdp_desc *desc,
+static void ice_xmit_pkt(struct ice_tx_ring *xdp_ring,
+ struct xsk_buff_pool *xsk_pool, struct xdp_desc *desc,
unsigned int *total_bytes)
{
struct ice_tx_desc *tx_desc;
dma_addr_t dma;
- dma = xsk_buff_raw_get_dma(xdp_ring->xsk_pool, desc->addr);
- xsk_buff_raw_dma_sync_for_device(xdp_ring->xsk_pool, dma, desc->len);
+ dma = xsk_buff_raw_get_dma(xsk_pool, desc->addr);
+ xsk_buff_raw_dma_sync_for_device(xsk_pool, dma, desc->len);
tx_desc = ICE_TX_DESC(xdp_ring, xdp_ring->next_to_use++);
tx_desc->buf_addr = cpu_to_le64(dma);
@@ -986,10 +1012,13 @@ static void ice_xmit_pkt(struct ice_tx_ring *xdp_ring, struct xdp_desc *desc,
/**
* ice_xmit_pkt_batch - produce a batch of HW Tx descriptors out of AF_XDP descriptors
* @xdp_ring: XDP ring to produce the HW Tx descriptors on
+ * @xsk_pool: XSK buffer pool to pick buffers to be consumed by HW
* @descs: AF_XDP descriptors to pull the DMA addresses and lengths from
* @total_bytes: bytes accumulator that will be used for stats update
*/
-static void ice_xmit_pkt_batch(struct ice_tx_ring *xdp_ring, struct xdp_desc *descs,
+static void ice_xmit_pkt_batch(struct ice_tx_ring *xdp_ring,
+ struct xsk_buff_pool *xsk_pool,
+ struct xdp_desc *descs,
unsigned int *total_bytes)
{
u16 ntu = xdp_ring->next_to_use;
@@ -999,8 +1028,8 @@ static void ice_xmit_pkt_batch(struct ice_tx_ring *xdp_ring, struct xdp_desc *de
loop_unrolled_for(i = 0; i < PKTS_PER_BATCH; i++) {
dma_addr_t dma;
- dma = xsk_buff_raw_get_dma(xdp_ring->xsk_pool, descs[i].addr);
- xsk_buff_raw_dma_sync_for_device(xdp_ring->xsk_pool, dma, descs[i].len);
+ dma = xsk_buff_raw_get_dma(xsk_pool, descs[i].addr);
+ xsk_buff_raw_dma_sync_for_device(xsk_pool, dma, descs[i].len);
tx_desc = ICE_TX_DESC(xdp_ring, ntu++);
tx_desc->buf_addr = cpu_to_le64(dma);
@@ -1016,60 +1045,69 @@ static void ice_xmit_pkt_batch(struct ice_tx_ring *xdp_ring, struct xdp_desc *de
/**
* ice_fill_tx_hw_ring - produce the number of Tx descriptors onto ring
* @xdp_ring: XDP ring to produce the HW Tx descriptors on
+ * @xsk_pool: XSK buffer pool to pick buffers to be consumed by HW
* @descs: AF_XDP descriptors to pull the DMA addresses and lengths from
* @nb_pkts: count of packets to be send
* @total_bytes: bytes accumulator that will be used for stats update
*/
-static void ice_fill_tx_hw_ring(struct ice_tx_ring *xdp_ring, struct xdp_desc *descs,
- u32 nb_pkts, unsigned int *total_bytes)
+static void ice_fill_tx_hw_ring(struct ice_tx_ring *xdp_ring,
+ struct xsk_buff_pool *xsk_pool,
+ struct xdp_desc *descs, u32 nb_pkts,
+ unsigned int *total_bytes)
{
u32 batched, leftover, i;
batched = ALIGN_DOWN(nb_pkts, PKTS_PER_BATCH);
leftover = nb_pkts & (PKTS_PER_BATCH - 1);
for (i = 0; i < batched; i += PKTS_PER_BATCH)
- ice_xmit_pkt_batch(xdp_ring, &descs[i], total_bytes);
+ ice_xmit_pkt_batch(xdp_ring, xsk_pool, &descs[i], total_bytes);
for (; i < batched + leftover; i++)
- ice_xmit_pkt(xdp_ring, &descs[i], total_bytes);
+ ice_xmit_pkt(xdp_ring, xsk_pool, &descs[i], total_bytes);
}
/**
* ice_xmit_zc - take entries from XSK Tx ring and place them onto HW Tx ring
* @xdp_ring: XDP ring to produce the HW Tx descriptors on
+ * @xsk_pool: AF_XDP buffer pool pointer
*
* Returns true if there is no more work that needs to be done, false otherwise
*/
-bool ice_xmit_zc(struct ice_tx_ring *xdp_ring)
+bool ice_xmit_zc(struct ice_tx_ring *xdp_ring, struct xsk_buff_pool *xsk_pool)
{
- struct xdp_desc *descs = xdp_ring->xsk_pool->tx_descs;
+ struct xdp_desc *descs = xsk_pool->tx_descs;
u32 nb_pkts, nb_processed = 0;
unsigned int total_bytes = 0;
int budget;
- ice_clean_xdp_irq_zc(xdp_ring);
+ ice_clean_xdp_irq_zc(xdp_ring, xsk_pool);
+
+ if (!netif_carrier_ok(xdp_ring->vsi->netdev) ||
+ !netif_running(xdp_ring->vsi->netdev))
+ return true;
budget = ICE_DESC_UNUSED(xdp_ring);
budget = min_t(u16, budget, ICE_RING_QUARTER(xdp_ring));
- nb_pkts = xsk_tx_peek_release_desc_batch(xdp_ring->xsk_pool, budget);
+ nb_pkts = xsk_tx_peek_release_desc_batch(xsk_pool, budget);
if (!nb_pkts)
return true;
if (xdp_ring->next_to_use + nb_pkts >= xdp_ring->count) {
nb_processed = xdp_ring->count - xdp_ring->next_to_use;
- ice_fill_tx_hw_ring(xdp_ring, descs, nb_processed, &total_bytes);
+ ice_fill_tx_hw_ring(xdp_ring, xsk_pool, descs, nb_processed,
+ &total_bytes);
xdp_ring->next_to_use = 0;
}
- ice_fill_tx_hw_ring(xdp_ring, &descs[nb_processed], nb_pkts - nb_processed,
- &total_bytes);
+ ice_fill_tx_hw_ring(xdp_ring, xsk_pool, &descs[nb_processed],
+ nb_pkts - nb_processed, &total_bytes);
ice_set_rs_bit(xdp_ring);
ice_xdp_ring_update_tail(xdp_ring);
ice_update_tx_ring_stats(xdp_ring, nb_pkts, total_bytes);
- if (xsk_uses_need_wakeup(xdp_ring->xsk_pool))
- xsk_set_tx_need_wakeup(xdp_ring->xsk_pool);
+ if (xsk_uses_need_wakeup(xsk_pool))
+ xsk_set_tx_need_wakeup(xsk_pool);
return nb_pkts < budget;
}
@@ -1091,7 +1129,7 @@ ice_xsk_wakeup(struct net_device *netdev, u32 queue_id,
struct ice_vsi *vsi = np->vsi;
struct ice_tx_ring *ring;
- if (test_bit(ICE_VSI_DOWN, vsi->state))
+ if (test_bit(ICE_VSI_DOWN, vsi->state) || !netif_carrier_ok(netdev))
return -ENETDOWN;
if (!ice_is_xdp_ena_vsi(vsi))
@@ -1102,7 +1140,7 @@ ice_xsk_wakeup(struct net_device *netdev, u32 queue_id,
ring = vsi->rx_rings[queue_id]->xdp_ring;
- if (!ring->xsk_pool)
+ if (!READ_ONCE(ring->xsk_pool))
return -EINVAL;
/* The idea here is that if NAPI is running, mark a miss, so
diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.h b/drivers/net/ethernet/intel/ice/ice_xsk.h
index 6fa181f080ef..45adeb513253 100644
--- a/drivers/net/ethernet/intel/ice/ice_xsk.h
+++ b/drivers/net/ethernet/intel/ice/ice_xsk.h
@@ -20,16 +20,20 @@ struct ice_vsi;
#ifdef CONFIG_XDP_SOCKETS
int ice_xsk_pool_setup(struct ice_vsi *vsi, struct xsk_buff_pool *pool,
u16 qid);
-int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, int budget);
+int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring,
+ struct xsk_buff_pool *xsk_pool,
+ int budget);
int ice_xsk_wakeup(struct net_device *netdev, u32 queue_id, u32 flags);
-bool ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring, u16 count);
+bool ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring,
+ struct xsk_buff_pool *xsk_pool, u16 count);
bool ice_xsk_any_rx_ring_ena(struct ice_vsi *vsi);
void ice_xsk_clean_rx_ring(struct ice_rx_ring *rx_ring);
void ice_xsk_clean_xdp_ring(struct ice_tx_ring *xdp_ring);
-bool ice_xmit_zc(struct ice_tx_ring *xdp_ring);
+bool ice_xmit_zc(struct ice_tx_ring *xdp_ring, struct xsk_buff_pool *xsk_pool);
int ice_realloc_zc_buf(struct ice_vsi *vsi, bool zc);
#else
-static inline bool ice_xmit_zc(struct ice_tx_ring __always_unused *xdp_ring)
+static inline bool ice_xmit_zc(struct ice_tx_ring __always_unused *xdp_ring,
+ struct xsk_buff_pool __always_unused *xsk_pool)
{
return false;
}
@@ -44,6 +48,7 @@ ice_xsk_pool_setup(struct ice_vsi __always_unused *vsi,
static inline int
ice_clean_rx_irq_zc(struct ice_rx_ring __always_unused *rx_ring,
+ struct xsk_buff_pool __always_unused *xsk_pool,
int __always_unused budget)
{
return 0;
@@ -51,6 +56,7 @@ ice_clean_rx_irq_zc(struct ice_rx_ring __always_unused *rx_ring,
static inline bool
ice_alloc_rx_bufs_zc(struct ice_rx_ring __always_unused *rx_ring,
+ struct xsk_buff_pool __always_unused *xsk_pool,
u16 __always_unused count)
{
return false;
diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
index cb5c7b09e8a0..8daf938afc36 100644
--- a/drivers/net/ethernet/intel/igc/igc_main.c
+++ b/drivers/net/ethernet/intel/igc/igc_main.c
@@ -6306,21 +6306,6 @@ static int igc_save_qbv_schedule(struct igc_adapter *adapter,
size_t n;
int i;
- switch (qopt->cmd) {
- case TAPRIO_CMD_REPLACE:
- break;
- case TAPRIO_CMD_DESTROY:
- return igc_tsn_clear_schedule(adapter);
- case TAPRIO_CMD_STATS:
- igc_taprio_stats(adapter->netdev, &qopt->stats);
- return 0;
- case TAPRIO_CMD_QUEUE_STATS:
- igc_taprio_queue_stats(adapter->netdev, &qopt->queue_stats);
- return 0;
- default:
- return -EOPNOTSUPP;
- }
-
if (qopt->base_time < 0)
return -ERANGE;
@@ -6429,7 +6414,23 @@ static int igc_tsn_enable_qbv_scheduling(struct igc_adapter *adapter,
if (hw->mac.type != igc_i225)
return -EOPNOTSUPP;
- err = igc_save_qbv_schedule(adapter, qopt);
+ switch (qopt->cmd) {
+ case TAPRIO_CMD_REPLACE:
+ err = igc_save_qbv_schedule(adapter, qopt);
+ break;
+ case TAPRIO_CMD_DESTROY:
+ err = igc_tsn_clear_schedule(adapter);
+ break;
+ case TAPRIO_CMD_STATS:
+ igc_taprio_stats(adapter->netdev, &qopt->stats);
+ return 0;
+ case TAPRIO_CMD_QUEUE_STATS:
+ igc_taprio_queue_stats(adapter->netdev, &qopt->queue_stats);
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+
if (err)
return err;
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
index 8c45ad983abc..0d62a33afa80 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
@@ -953,13 +953,13 @@ static void mvpp2_bm_pool_update_fc(struct mvpp2_port *port,
static void mvpp2_bm_pool_update_priv_fc(struct mvpp2 *priv, bool en)
{
struct mvpp2_port *port;
- int i;
+ int i, j;
for (i = 0; i < priv->port_count; i++) {
port = priv->port_list[i];
if (port->priv->percpu_pools) {
- for (i = 0; i < port->nrxqs; i++)
- mvpp2_bm_pool_update_fc(port, &port->priv->bm_pools[i],
+ for (j = 0; j < port->nrxqs; j++)
+ mvpp2_bm_pool_update_fc(port, &port->priv->bm_pools[j],
port->tx_fc & en);
} else {
mvpp2_bm_pool_update_fc(port, port->pool_long, port->tx_fc & en);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
index 8cf8ba2622f2..71a168746ebe 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
@@ -932,6 +932,7 @@ err_rule:
mlx5_tc_ct_entry_destroy_mod_hdr(ct_priv, zone_rule->attr, mh);
mlx5_put_label_mapping(ct_priv, attr->ct_attr.ct_labels_id);
err_mod_hdr:
+ *attr = *old_attr;
kfree(old_attr);
err_attr:
kvfree(spec);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c
index 6e00afe4671b..797db853de36 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c
@@ -51,9 +51,10 @@ u32 mlx5_ipsec_device_caps(struct mlx5_core_dev *mdev)
MLX5_CAP_FLOWTABLE_NIC_RX(mdev, decap))
caps |= MLX5_IPSEC_CAP_PACKET_OFFLOAD;
- if ((MLX5_CAP_FLOWTABLE_NIC_TX(mdev, ignore_flow_level) &&
- MLX5_CAP_FLOWTABLE_NIC_RX(mdev, ignore_flow_level)) ||
- MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, ignore_flow_level))
+ if (IS_ENABLED(CONFIG_MLX5_CLS_ACT) &&
+ ((MLX5_CAP_FLOWTABLE_NIC_TX(mdev, ignore_flow_level) &&
+ MLX5_CAP_FLOWTABLE_NIC_RX(mdev, ignore_flow_level)) ||
+ MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, ignore_flow_level)))
caps |= MLX5_IPSEC_CAP_PRIO;
if (MLX5_CAP_FLOWTABLE_NIC_TX(mdev,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index 00d5661dc62e..36845872ae94 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -1409,7 +1409,12 @@ static int mlx5e_ethtool_set_link_ksettings(struct mlx5e_priv *priv,
if (!an_changes && link_modes == eproto.admin)
goto out;
- mlx5_port_set_eth_ptys(mdev, an_disable, link_modes, ext);
+ err = mlx5_port_set_eth_ptys(mdev, an_disable, link_modes, ext);
+ if (err) {
+ netdev_err(priv->netdev, "%s: failed to set ptys reg: %d\n", __func__, err);
+ goto out;
+ }
+
mlx5_toggle_port_link(mdev);
out:
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
index 979c49ae6b5c..b43ca0b762c3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
@@ -207,6 +207,7 @@ int mlx5_fw_reset_set_live_patch(struct mlx5_core_dev *dev)
static void mlx5_fw_reset_complete_reload(struct mlx5_core_dev *dev, bool unloaded)
{
struct mlx5_fw_reset *fw_reset = dev->priv.fw_reset;
+ struct devlink *devlink = priv_to_devlink(dev);
/* if this is the driver that initiated the fw reset, devlink completed the reload */
if (test_bit(MLX5_FW_RESET_FLAGS_PENDING_COMP, &fw_reset->reset_flags)) {
@@ -218,9 +219,11 @@ static void mlx5_fw_reset_complete_reload(struct mlx5_core_dev *dev, bool unload
mlx5_core_err(dev, "reset reload flow aborted, PCI reads still not working\n");
else
mlx5_load_one(dev, true);
- devlink_remote_reload_actions_performed(priv_to_devlink(dev), 0,
+ devl_lock(devlink);
+ devlink_remote_reload_actions_performed(devlink, 0,
BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT) |
BIT(DEVLINK_RELOAD_ACTION_FW_ACTIVATE));
+ devl_unlock(devlink);
}
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/irq_affinity.c b/drivers/net/ethernet/mellanox/mlx5/core/irq_affinity.c
index f7b01b3f0cba..1477db7f5307 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/irq_affinity.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/irq_affinity.c
@@ -48,6 +48,7 @@ static struct mlx5_irq *
irq_pool_request_irq(struct mlx5_irq_pool *pool, struct irq_affinity_desc *af_desc)
{
struct irq_affinity_desc auto_desc = {};
+ struct mlx5_irq *irq;
u32 irq_index;
int err;
@@ -64,9 +65,12 @@ irq_pool_request_irq(struct mlx5_irq_pool *pool, struct irq_affinity_desc *af_de
else
cpu_get(pool, cpumask_first(&af_desc->mask));
}
- return mlx5_irq_alloc(pool, irq_index,
- cpumask_empty(&auto_desc.mask) ? af_desc : &auto_desc,
- NULL);
+ irq = mlx5_irq_alloc(pool, irq_index,
+ cpumask_empty(&auto_desc.mask) ? af_desc : &auto_desc,
+ NULL);
+ if (IS_ERR(irq))
+ xa_erase(&pool->irqs, irq_index);
+ return irq;
}
/* Looking for the IRQ with the smallest refcount that fits req_mask.
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
index d0871c46b8c5..cf8045b92689 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
@@ -1538,7 +1538,7 @@ u8 mlx5_lag_get_slave_port(struct mlx5_core_dev *dev,
goto unlock;
for (i = 0; i < ldev->ports; i++) {
- if (ldev->pf[MLX5_LAG_P1].netdev == slave) {
+ if (ldev->pf[i].netdev == slave) {
port = i;
break;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index 527da58c7953..5b7e6f4b5c7e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -2142,7 +2142,6 @@ static int mlx5_try_fast_unload(struct mlx5_core_dev *dev)
/* Panic tear down fw command will stop the PCI bus communication
* with the HCA, so the health poll is no longer needed.
*/
- mlx5_drain_health_wq(dev);
mlx5_stop_health_poll(dev, false);
ret = mlx5_cmd_fast_teardown_hca(dev);
@@ -2177,6 +2176,7 @@ static void shutdown(struct pci_dev *pdev)
mlx5_core_info(dev, "Shutdown was called\n");
set_bit(MLX5_BREAK_FW_WAIT, &dev->intf_state);
+ mlx5_drain_health_wq(dev);
err = mlx5_try_fast_unload(dev);
if (err)
mlx5_unload_one(dev, false);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c
index b2986175d9af..b706f1486504 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c
@@ -112,6 +112,7 @@ static void mlx5_sf_dev_shutdown(struct auxiliary_device *adev)
struct mlx5_core_dev *mdev = sf_dev->mdev;
set_bit(MLX5_BREAK_FW_WAIT, &mdev->intf_state);
+ mlx5_drain_health_wq(mdev);
mlx5_unload_one(mdev, false);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c
index 042ca0349124..d1db04baa1fa 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c
@@ -7,7 +7,7 @@
/* don't try to optimize STE allocation if the stack is too constaraining */
#define DR_RULE_MAX_STES_OPTIMIZED 0
#else
-#define DR_RULE_MAX_STES_OPTIMIZED 5
+#define DR_RULE_MAX_STES_OPTIMIZED 2
#endif
#define DR_RULE_MAX_STE_CHAIN_OPTIMIZED (DR_RULE_MAX_STES_OPTIMIZED + DR_ACTION_MAX_STES)
diff --git a/drivers/net/ethernet/meta/Kconfig b/drivers/net/ethernet/meta/Kconfig
index 86034ea4ba5b..c002ede36402 100644
--- a/drivers/net/ethernet/meta/Kconfig
+++ b/drivers/net/ethernet/meta/Kconfig
@@ -20,7 +20,7 @@ if NET_VENDOR_META
config FBNIC
tristate "Meta Platforms Host Network Interface"
depends on X86_64 || COMPILE_TEST
- depends on S390=n
+ depends on !S390
depends on MAX_SKB_FRAGS < 22
depends on PCI_MSI
select PHYLINK
diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
index 714d2e804694..3507c2e28110 100644
--- a/drivers/net/ethernet/realtek/r8169_main.c
+++ b/drivers/net/ethernet/realtek/r8169_main.c
@@ -4349,7 +4349,8 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
if (unlikely(!rtl_tx_slots_avail(tp))) {
if (net_ratelimit())
netdev_err(dev, "BUG! Tx Ring full when queue awake!\n");
- goto err_stop_0;
+ netif_stop_queue(dev);
+ return NETDEV_TX_BUSY;
}
opts[1] = rtl8169_tx_vlan_tag(skb);
@@ -4405,11 +4406,6 @@ err_dma_0:
dev_kfree_skb_any(skb);
dev->stats.tx_dropped++;
return NETDEV_TX_OK;
-
-err_stop_0:
- netif_stop_queue(dev);
- dev->stats.tx_dropped++;
- return NETDEV_TX_BUSY;
}
static unsigned int rtl_last_frag_len(struct sk_buff *skb)
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
index e342f387c3dd..02fdf66e07fa 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
@@ -2219,9 +2219,9 @@ static void axienet_dma_err_handler(struct work_struct *work)
~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
axienet_set_mac_address(ndev, NULL);
axienet_set_multicast_list(ndev);
- axienet_setoptions(ndev, lp->options);
napi_enable(&lp->napi_rx);
napi_enable(&lp->napi_tx);
+ axienet_setoptions(ndev, lp->options);
}
/**
diff --git a/drivers/net/fjes/fjes_main.c b/drivers/net/fjes/fjes_main.c
index b3ddc9a629d9..fad5b6564464 100644
--- a/drivers/net/fjes/fjes_main.c
+++ b/drivers/net/fjes/fjes_main.c
@@ -14,9 +14,7 @@
#include "fjes.h"
#include "fjes_trace.h"
-#define MAJ 1
-#define MIN 2
-#define DRV_VERSION __stringify(MAJ) "." __stringify(MIN)
+#define DRV_VERSION "1.2"
#define DRV_NAME "fjes"
char fjes_driver_name[] = DRV_NAME;
char fjes_driver_version[] = DRV_VERSION;
diff --git a/drivers/net/phy/aquantia/aquantia_main.c b/drivers/net/phy/aquantia/aquantia_main.c
index d12e35374231..e982e9ce44a5 100644
--- a/drivers/net/phy/aquantia/aquantia_main.c
+++ b/drivers/net/phy/aquantia/aquantia_main.c
@@ -653,13 +653,7 @@ static int aqr107_fill_interface_modes(struct phy_device *phydev)
unsigned long *possible = phydev->possible_interfaces;
unsigned int serdes_mode, rate_adapt;
phy_interface_t interface;
- int i, val, ret;
-
- ret = phy_read_mmd_poll_timeout(phydev, MDIO_MMD_VEND1,
- VEND1_GLOBAL_CFG_10M, val, val != 0,
- 1000, 100000, false);
- if (ret)
- return ret;
+ int i, val;
/* Walk the media-speed configuration registers to determine which
* host-side serdes modes may be used by the PHY depending on the
@@ -708,6 +702,25 @@ static int aqr107_fill_interface_modes(struct phy_device *phydev)
return 0;
}
+static int aqr113c_fill_interface_modes(struct phy_device *phydev)
+{
+ int val, ret;
+
+ /* It's been observed on some models that - when coming out of suspend
+ * - the FW signals that the PHY is ready but the GLOBAL_CFG registers
+ * continue on returning zeroes for some time. Let's poll the 100M
+ * register until it returns a real value as both 113c and 115c support
+ * this mode.
+ */
+ ret = phy_read_mmd_poll_timeout(phydev, MDIO_MMD_VEND1,
+ VEND1_GLOBAL_CFG_100M, val, val != 0,
+ 1000, 100000, false);
+ if (ret)
+ return ret;
+
+ return aqr107_fill_interface_modes(phydev);
+}
+
static int aqr113c_config_init(struct phy_device *phydev)
{
int ret;
@@ -725,7 +738,7 @@ static int aqr113c_config_init(struct phy_device *phydev)
if (ret)
return ret;
- return aqr107_fill_interface_modes(phydev);
+ return aqr113c_fill_interface_modes(phydev);
}
static int aqr107_probe(struct phy_device *phydev)
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index dd519805deee..65b0a3115e14 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -1389,6 +1389,8 @@ static int ksz9131_config_init(struct phy_device *phydev)
const struct device *dev_walker;
int ret;
+ phydev->mdix_ctrl = ETH_TP_MDI_AUTO;
+
dev_walker = &phydev->mdio.dev;
do {
of_node = dev_walker->of_node;
@@ -1438,28 +1440,30 @@ static int ksz9131_config_init(struct phy_device *phydev)
#define MII_KSZ9131_AUTO_MDIX 0x1C
#define MII_KSZ9131_AUTO_MDI_SET BIT(7)
#define MII_KSZ9131_AUTO_MDIX_SWAP_OFF BIT(6)
+#define MII_KSZ9131_DIG_AXAN_STS 0x14
+#define MII_KSZ9131_DIG_AXAN_STS_LINK_DET BIT(14)
+#define MII_KSZ9131_DIG_AXAN_STS_A_SELECT BIT(12)
static int ksz9131_mdix_update(struct phy_device *phydev)
{
int ret;
- ret = phy_read(phydev, MII_KSZ9131_AUTO_MDIX);
- if (ret < 0)
- return ret;
-
- if (ret & MII_KSZ9131_AUTO_MDIX_SWAP_OFF) {
- if (ret & MII_KSZ9131_AUTO_MDI_SET)
- phydev->mdix_ctrl = ETH_TP_MDI;
- else
- phydev->mdix_ctrl = ETH_TP_MDI_X;
+ if (phydev->mdix_ctrl != ETH_TP_MDI_AUTO) {
+ phydev->mdix = phydev->mdix_ctrl;
} else {
- phydev->mdix_ctrl = ETH_TP_MDI_AUTO;
- }
+ ret = phy_read(phydev, MII_KSZ9131_DIG_AXAN_STS);
+ if (ret < 0)
+ return ret;
- if (ret & MII_KSZ9131_AUTO_MDI_SET)
- phydev->mdix = ETH_TP_MDI;
- else
- phydev->mdix = ETH_TP_MDI_X;
+ if (ret & MII_KSZ9131_DIG_AXAN_STS_LINK_DET) {
+ if (ret & MII_KSZ9131_DIG_AXAN_STS_A_SELECT)
+ phydev->mdix = ETH_TP_MDI;
+ else
+ phydev->mdix = ETH_TP_MDI_X;
+ } else {
+ phydev->mdix = ETH_TP_MDI_INVALID;
+ }
+ }
return 0;
}
diff --git a/drivers/net/phy/realtek.c b/drivers/net/phy/realtek.c
index bed839237fb5..87865918dab6 100644
--- a/drivers/net/phy/realtek.c
+++ b/drivers/net/phy/realtek.c
@@ -1465,6 +1465,13 @@ static struct phy_driver realtek_drvs[] = {
.handle_interrupt = genphy_handle_interrupt_no_ack,
.suspend = genphy_suspend,
.resume = genphy_resume,
+ }, {
+ PHY_ID_MATCH_EXACT(0x001cc960),
+ .name = "RTL8366S Gigabit Ethernet",
+ .suspend = genphy_suspend,
+ .resume = genphy_resume,
+ .read_mmd = genphy_read_mmd_unsupported,
+ .write_mmd = genphy_write_mmd_unsupported,
},
};
diff --git a/drivers/net/usb/sr9700.c b/drivers/net/usb/sr9700.c
index 0a662e42ed96..cb7d2f798fb4 100644
--- a/drivers/net/usb/sr9700.c
+++ b/drivers/net/usb/sr9700.c
@@ -179,6 +179,7 @@ static int sr_mdio_read(struct net_device *netdev, int phy_id, int loc)
struct usbnet *dev = netdev_priv(netdev);
__le16 res;
int rc = 0;
+ int err;
if (phy_id) {
netdev_dbg(netdev, "Only internal phy supported\n");
@@ -189,11 +190,17 @@ static int sr_mdio_read(struct net_device *netdev, int phy_id, int loc)
if (loc == MII_BMSR) {
u8 value;
- sr_read_reg(dev, SR_NSR, &value);
+ err = sr_read_reg(dev, SR_NSR, &value);
+ if (err < 0)
+ return err;
+
if (value & NSR_LINKST)
rc = 1;
}
- sr_share_read_word(dev, 1, loc, &res);
+ err = sr_share_read_word(dev, 1, loc, &res);
+ if (err < 0)
+ return err;
+
if (rc == 1)
res = le16_to_cpu(res) | BMSR_LSTATUS;
else
diff --git a/drivers/net/wan/fsl_qmc_hdlc.c b/drivers/net/wan/fsl_qmc_hdlc.c
index c5e7ca793c43..8fcfbde31a1c 100644
--- a/drivers/net/wan/fsl_qmc_hdlc.c
+++ b/drivers/net/wan/fsl_qmc_hdlc.c
@@ -18,6 +18,7 @@
#include <linux/hdlc.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
+#include <linux/mutex.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
@@ -37,7 +38,7 @@ struct qmc_hdlc {
struct qmc_chan *qmc_chan;
struct net_device *netdev;
struct framer *framer;
- spinlock_t carrier_lock; /* Protect carrier detection */
+ struct mutex carrier_lock; /* Protect carrier detection */
struct notifier_block nb;
bool is_crc32;
spinlock_t tx_lock; /* Protect tx descriptors */
@@ -60,7 +61,7 @@ static int qmc_hdlc_framer_set_carrier(struct qmc_hdlc *qmc_hdlc)
if (!qmc_hdlc->framer)
return 0;
- guard(spinlock_irqsave)(&qmc_hdlc->carrier_lock);
+ guard(mutex)(&qmc_hdlc->carrier_lock);
ret = framer_get_status(qmc_hdlc->framer, &framer_status);
if (ret) {
@@ -249,6 +250,7 @@ static void qmc_hcld_recv_complete(void *context, size_t length, unsigned int fl
struct qmc_hdlc_desc *desc = context;
struct net_device *netdev;
struct qmc_hdlc *qmc_hdlc;
+ size_t crc_size;
int ret;
netdev = desc->netdev;
@@ -267,15 +269,26 @@ static void qmc_hcld_recv_complete(void *context, size_t length, unsigned int fl
if (flags & QMC_RX_FLAG_HDLC_CRC) /* CRC error */
netdev->stats.rx_crc_errors++;
kfree_skb(desc->skb);
- } else {
- netdev->stats.rx_packets++;
- netdev->stats.rx_bytes += length;
+ goto re_queue;
+ }
- skb_put(desc->skb, length);
- desc->skb->protocol = hdlc_type_trans(desc->skb, netdev);
- netif_rx(desc->skb);
+ /* Discard the CRC */
+ crc_size = qmc_hdlc->is_crc32 ? 4 : 2;
+ if (length < crc_size) {
+ netdev->stats.rx_length_errors++;
+ kfree_skb(desc->skb);
+ goto re_queue;
}
+ length -= crc_size;
+
+ netdev->stats.rx_packets++;
+ netdev->stats.rx_bytes += length;
+
+ skb_put(desc->skb, length);
+ desc->skb->protocol = hdlc_type_trans(desc->skb, netdev);
+ netif_rx(desc->skb);
+re_queue:
/* Re-queue a transfer using the same descriptor */
ret = qmc_hdlc_recv_queue(qmc_hdlc, desc, desc->dma_size);
if (ret) {
@@ -706,7 +719,7 @@ static int qmc_hdlc_probe(struct platform_device *pdev)
qmc_hdlc->dev = dev;
spin_lock_init(&qmc_hdlc->tx_lock);
- spin_lock_init(&qmc_hdlc->carrier_lock);
+ mutex_init(&qmc_hdlc->carrier_lock);
qmc_hdlc->qmc_chan = devm_qmc_chan_get_bychild(dev, dev->of_node);
if (IS_ERR(qmc_hdlc->qmc_chan))
diff --git a/drivers/net/wireless/ath/ath12k/pci.c b/drivers/net/wireless/ath/ath12k/pci.c
index 876c029f58f6..9e0b9e329bda 100644
--- a/drivers/net/wireless/ath/ath12k/pci.c
+++ b/drivers/net/wireless/ath/ath12k/pci.c
@@ -473,7 +473,8 @@ static void __ath12k_pci_ext_irq_disable(struct ath12k_base *ab)
{
int i;
- clear_bit(ATH12K_FLAG_EXT_IRQ_ENABLED, &ab->dev_flags);
+ if (!test_and_clear_bit(ATH12K_FLAG_EXT_IRQ_ENABLED, &ab->dev_flags))
+ return;
for (i = 0; i < ATH12K_EXT_IRQ_GRP_NUM_MAX; i++) {
struct ath12k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
diff --git a/drivers/net/wireless/ath/ath12k/wow.c b/drivers/net/wireless/ath/ath12k/wow.c
index bead19db2c9a..9b8684abbe40 100644
--- a/drivers/net/wireless/ath/ath12k/wow.c
+++ b/drivers/net/wireless/ath/ath12k/wow.c
@@ -361,7 +361,7 @@ static int ath12k_wow_vif_set_wakeups(struct ath12k_vif *arvif,
struct ath12k *ar = arvif->ar;
unsigned long wow_mask = 0;
int pattern_id = 0;
- int ret, i;
+ int ret, i, j;
/* Setup requested WOW features */
switch (arvif->vdev_type) {
@@ -431,9 +431,9 @@ static int ath12k_wow_vif_set_wakeups(struct ath12k_vif *arvif,
eth_pattern->pattern_len);
/* convert bitmask to bytemask */
- for (i = 0; i < eth_pattern->pattern_len; i++)
- if (eth_pattern->mask[i / 8] & BIT(i % 8))
- new_pattern.bytemask[i] = 0xff;
+ for (j = 0; j < eth_pattern->pattern_len; j++)
+ if (eth_pattern->mask[j / 8] & BIT(j % 8))
+ new_pattern.bytemask[j] = 0xff;
new_pattern.pattern_len = eth_pattern->pattern_len;
new_pattern.pkt_offset = eth_pattern->pkt_offset;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/main.c b/drivers/net/wireless/mediatek/mt76/mt7921/main.c
index 2e6268cb06c0..1bab93d049df 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/main.c
@@ -303,6 +303,7 @@ mt7921_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
mvif->bss_conf.mt76.omac_idx = mvif->bss_conf.mt76.idx;
mvif->phy = phy;
+ mvif->bss_conf.vif = mvif;
mvif->bss_conf.mt76.band_idx = 0;
mvif->bss_conf.mt76.wmm_idx = mvif->bss_conf.mt76.idx % MT76_CONNAC_MAX_WMM_SETS;
diff --git a/drivers/nfc/pn544/i2c.c b/drivers/nfc/pn544/i2c.c
index 9fe664960b38..e2a6575b9ff7 100644
--- a/drivers/nfc/pn544/i2c.c
+++ b/drivers/nfc/pn544/i2c.c
@@ -126,8 +126,6 @@ struct pn544_i2c_fw_secure_blob {
#define PN544_FW_CMD_RESULT_COMMAND_REJECTED 0xE0
#define PN544_FW_CMD_RESULT_CHUNK_ERROR 0xE6
-#define MIN(X, Y) ((X) < (Y) ? (X) : (Y))
-
#define PN544_FW_WRITE_BUFFER_MAX_LEN 0x9f7
#define PN544_FW_I2C_MAX_PAYLOAD PN544_HCI_I2C_LLC_MAX_SIZE
#define PN544_FW_I2C_WRITE_FRAME_HEADER_LEN 8
diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c
index 061f01f60db4..736ad8baa2a5 100644
--- a/drivers/pci/hotplug/pciehp_hpc.c
+++ b/drivers/pci/hotplug/pciehp_hpc.c
@@ -485,7 +485,9 @@ int pciehp_set_raw_indicator_status(struct hotplug_slot *hotplug_slot,
struct pci_dev *pdev = ctrl_dev(ctrl);
pci_config_pm_runtime_get(pdev);
- pcie_write_cmd_nowait(ctrl, FIELD_PREP(PCI_EXP_SLTCTL_AIC, status),
+
+ /* Attention and Power Indicator Control bits are supported */
+ pcie_write_cmd_nowait(ctrl, FIELD_PREP(PCI_EXP_SLTCTL_AIC | PCI_EXP_SLTCTL_PIC, status),
PCI_EXP_SLTCTL_AIC | PCI_EXP_SLTCTL_PIC);
pci_config_pm_runtime_put(pdev);
return 0;
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index e3a49f66982d..ffaaca0978cb 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -4477,12 +4477,6 @@ void pci_intx(struct pci_dev *pdev, int enable)
{
u16 pci_command, new;
- /* Preserve the "hybrid" behavior for backwards compatibility */
- if (pci_is_managed(pdev)) {
- WARN_ON_ONCE(pcim_intx(pdev, enable) != 0);
- return;
- }
-
pci_read_config_word(pdev, PCI_COMMAND, &pci_command);
if (enable)
@@ -4490,8 +4484,15 @@ void pci_intx(struct pci_dev *pdev, int enable)
else
new = pci_command | PCI_COMMAND_INTX_DISABLE;
- if (new != pci_command)
+ if (new != pci_command) {
+ /* Preserve the "hybrid" behavior for backwards compatibility */
+ if (pci_is_managed(pdev)) {
+ WARN_ON_ONCE(pcim_intx(pdev, enable) != 0);
+ return;
+ }
+
pci_write_config_word(pdev, PCI_COMMAND, new);
+ }
}
EXPORT_SYMBOL_GPL(pci_intx);
diff --git a/drivers/perf/riscv_pmu_sbi.c b/drivers/perf/riscv_pmu_sbi.c
index 44d3951d009f..31a17a56eb3b 100644
--- a/drivers/perf/riscv_pmu_sbi.c
+++ b/drivers/perf/riscv_pmu_sbi.c
@@ -416,7 +416,7 @@ static int pmu_sbi_ctr_get_idx(struct perf_event *event)
* but not in the user access mode as we want to use the other counters
* that support sampling/filtering.
*/
- if (hwc->flags & PERF_EVENT_FLAG_LEGACY) {
+ if ((hwc->flags & PERF_EVENT_FLAG_LEGACY) && (event->attr.type == PERF_TYPE_HARDWARE)) {
if (event->attr.config == PERF_COUNT_HW_CPU_CYCLES) {
cflags |= SBI_PMU_CFG_FLAG_SKIP_MATCH;
cmask = 1;
diff --git a/drivers/platform/chrome/cros_ec_proto.c b/drivers/platform/chrome/cros_ec_proto.c
index f776fd42244f..73f75958e15c 100644
--- a/drivers/platform/chrome/cros_ec_proto.c
+++ b/drivers/platform/chrome/cros_ec_proto.c
@@ -813,9 +813,11 @@ int cros_ec_get_next_event(struct cros_ec_device *ec_dev,
if (ret == -ENOPROTOOPT) {
dev_dbg(ec_dev->dev,
"GET_NEXT_EVENT returned invalid version error.\n");
+ mutex_lock(&ec_dev->lock);
ret = cros_ec_get_host_command_version_mask(ec_dev,
EC_CMD_GET_NEXT_EVENT,
&ver_mask);
+ mutex_unlock(&ec_dev->lock);
if (ret < 0 || ver_mask == 0)
/*
* Do not change the MKBP supported version if we can't
diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c
index 3e94fdd1ea52..3197aaa69da7 100644
--- a/drivers/platform/x86/sony-laptop.c
+++ b/drivers/platform/x86/sony-laptop.c
@@ -757,7 +757,6 @@ static union acpi_object *__call_snc_method(acpi_handle handle, char *method,
return result;
}
-#define MIN(a, b) (a > b ? b : a)
static int sony_nc_buffer_call(acpi_handle handle, char *name, u64 *value,
void *buffer, size_t buflen)
{
diff --git a/drivers/s390/cio/ccwgroup.c b/drivers/s390/cio/ccwgroup.c
index b72f672a7720..66b1bdc63284 100644
--- a/drivers/s390/cio/ccwgroup.c
+++ b/drivers/s390/cio/ccwgroup.c
@@ -550,4 +550,5 @@ void ccwgroup_remove_ccwdev(struct ccw_device *cdev)
put_device(&gdev->dev);
}
EXPORT_SYMBOL(ccwgroup_remove_ccwdev);
+MODULE_DESCRIPTION("ccwgroup bus driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/s390/cio/vfio_ccw_drv.c b/drivers/s390/cio/vfio_ccw_drv.c
index 8ad49030a7bf..914dde041675 100644
--- a/drivers/s390/cio/vfio_ccw_drv.c
+++ b/drivers/s390/cio/vfio_ccw_drv.c
@@ -488,4 +488,5 @@ static void __exit vfio_ccw_sch_exit(void)
module_init(vfio_ccw_sch_init);
module_exit(vfio_ccw_sch_exit);
+MODULE_DESCRIPTION("VFIO based Subchannel device driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/scsi/isci/init.c b/drivers/scsi/isci/init.c
index d31884f82f2a..73085d2f5c43 100644
--- a/drivers/scsi/isci/init.c
+++ b/drivers/scsi/isci/init.c
@@ -65,11 +65,7 @@
#include "task.h"
#include "probe_roms.h"
-#define MAJ 1
-#define MIN 2
-#define BUILD 0
-#define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \
- __stringify(BUILD)
+#define DRV_VERSION "1.2.0"
MODULE_VERSION(DRV_VERSION);
diff --git a/drivers/scsi/mpi3mr/mpi3mr_os.c b/drivers/scsi/mpi3mr/mpi3mr_os.c
index 69b14918de59..ca8f132e03ae 100644
--- a/drivers/scsi/mpi3mr/mpi3mr_os.c
+++ b/drivers/scsi/mpi3mr/mpi3mr_os.c
@@ -3575,6 +3575,17 @@ static int mpi3mr_prepare_sg_scmd(struct mpi3mr_ioc *mrioc,
scmd->sc_data_direction);
priv->meta_sg_valid = 1; /* To unmap meta sg DMA */
} else {
+ /*
+ * Some firmware versions byte-swap the REPORT ZONES command
+ * reply from ATA-ZAC devices by directly accessing in the host
+ * buffer. This does not respect the default command DMA
+ * direction and causes IOMMU page faults on some architectures
+ * with an IOMMU enforcing write mappings (e.g. AMD hosts).
+ * Avoid such issue by making the REPORT ZONES buffer mapping
+ * bi-directional.
+ */
+ if (scmd->cmnd[0] == ZBC_IN && scmd->cmnd[1] == ZI_REPORT_ZONES)
+ scmd->sc_data_direction = DMA_BIDIRECTIONAL;
sg_scmd = scsi_sglist(scmd);
sges_left = scsi_dma_map(scmd);
}
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
index b2bcf4a27ddc..b785a7e88b49 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
@@ -2671,6 +2671,22 @@ _base_build_zero_len_sge_ieee(struct MPT3SAS_ADAPTER *ioc, void *paddr)
_base_add_sg_single_ieee(paddr, sgl_flags, 0, 0, -1);
}
+static inline int _base_scsi_dma_map(struct scsi_cmnd *cmd)
+{
+ /*
+ * Some firmware versions byte-swap the REPORT ZONES command reply from
+ * ATA-ZAC devices by directly accessing in the host buffer. This does
+ * not respect the default command DMA direction and causes IOMMU page
+ * faults on some architectures with an IOMMU enforcing write mappings
+ * (e.g. AMD hosts). Avoid such issue by making the report zones buffer
+ * mapping bi-directional.
+ */
+ if (cmd->cmnd[0] == ZBC_IN && cmd->cmnd[1] == ZI_REPORT_ZONES)
+ cmd->sc_data_direction = DMA_BIDIRECTIONAL;
+
+ return scsi_dma_map(cmd);
+}
+
/**
* _base_build_sg_scmd - main sg creation routine
* pcie_device is unused here!
@@ -2717,7 +2733,7 @@ _base_build_sg_scmd(struct MPT3SAS_ADAPTER *ioc,
sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
sg_scmd = scsi_sglist(scmd);
- sges_left = scsi_dma_map(scmd);
+ sges_left = _base_scsi_dma_map(scmd);
if (sges_left < 0)
return -ENOMEM;
@@ -2861,7 +2877,7 @@ _base_build_sg_scmd_ieee(struct MPT3SAS_ADAPTER *ioc,
}
sg_scmd = scsi_sglist(scmd);
- sges_left = scsi_dma_map(scmd);
+ sges_left = _base_scsi_dma_map(scmd);
if (sges_left < 0)
return -ENOMEM;
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index adeaa8ab9951..8bb3a3611851 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -4205,6 +4205,8 @@ static int sd_resume(struct device *dev)
{
struct scsi_disk *sdkp = dev_get_drvdata(dev);
+ sd_printk(KERN_NOTICE, sdkp, "Starting disk\n");
+
if (opal_unlock_from_suspend(sdkp->opal_dev)) {
sd_printk(KERN_NOTICE, sdkp, "OPAL unlock failed\n");
return -EIO;
@@ -4221,13 +4223,12 @@ static int sd_resume_common(struct device *dev, bool runtime)
if (!sdkp) /* E.g.: runtime resume at the start of sd_probe() */
return 0;
- sd_printk(KERN_NOTICE, sdkp, "Starting disk\n");
-
if (!sd_do_start_stop(sdkp->device, runtime)) {
sdkp->suspended = false;
return 0;
}
+ sd_printk(KERN_NOTICE, sdkp, "Starting disk\n");
ret = sd_start_stop_device(sdkp, 1);
if (!ret) {
sd_resume(dev);
diff --git a/drivers/scsi/sr_ioctl.c b/drivers/scsi/sr_ioctl.c
index a0d2556a27bb..089653018d32 100644
--- a/drivers/scsi/sr_ioctl.c
+++ b/drivers/scsi/sr_ioctl.c
@@ -431,7 +431,7 @@ int sr_select_speed(struct cdrom_device_info *cdi, unsigned long speed)
struct packet_command cgc;
/* avoid exceeding the max speed or overflowing integer bounds */
- speed = clamp(0, speed, 0xffff / 177);
+ speed = clamp(speed, 0, 0xffff / 177);
if (speed == 0)
speed = 0xffff; /* set to max */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_include/math_support.h b/drivers/staging/media/atomisp/pci/hive_isp_css_include/math_support.h
index 7349943bba2b..160c496784b7 100644
--- a/drivers/staging/media/atomisp/pci/hive_isp_css_include/math_support.h
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_include/math_support.h
@@ -22,11 +22,6 @@
/* force a value to a lower even value */
#define EVEN_FLOOR(x) ((x) & ~1)
-/* for preprocessor and array sizing use MIN and MAX
- otherwise use min and max */
-#define MAX(a, b) (((a) > (b)) ? (a) : (b))
-#define MIN(a, b) (((a) < (b)) ? (a) : (b))
-
#define CEIL_DIV(a, b) (((b) != 0) ? ((a) + (b) - 1) / (b) : 0)
#define CEIL_MUL(a, b) (CEIL_DIV(a, b) * (b))
#define CEIL_MUL2(a, b) (((a) + (b) - 1) & ~((b) - 1))
diff --git a/drivers/thermal/intel/int340x_thermal/processor_thermal_device_pci.c b/drivers/thermal/intel/int340x_thermal/processor_thermal_device_pci.c
index 114136893a59..006614921870 100644
--- a/drivers/thermal/intel/int340x_thermal/processor_thermal_device_pci.c
+++ b/drivers/thermal/intel/int340x_thermal/processor_thermal_device_pci.c
@@ -278,20 +278,32 @@ static struct thermal_zone_params tzone_params = {
static bool msi_irq;
+static void proc_thermal_free_msi(struct pci_dev *pdev, struct proc_thermal_pci *pci_info)
+{
+ int i;
+
+ for (i = 0; i < MSI_THERMAL_MAX; i++) {
+ if (proc_thermal_msi_map[i])
+ devm_free_irq(&pdev->dev, proc_thermal_msi_map[i], pci_info);
+ }
+
+ pci_free_irq_vectors(pdev);
+}
+
static int proc_thermal_setup_msi(struct pci_dev *pdev, struct proc_thermal_pci *pci_info)
{
- int ret, i, irq;
+ int ret, i, irq, count;
- ret = pci_alloc_irq_vectors(pdev, 1, MSI_THERMAL_MAX, PCI_IRQ_MSI | PCI_IRQ_MSIX);
- if (ret < 0) {
+ count = pci_alloc_irq_vectors(pdev, 1, MSI_THERMAL_MAX, PCI_IRQ_MSI | PCI_IRQ_MSIX);
+ if (count < 0) {
dev_err(&pdev->dev, "Failed to allocate vectors!\n");
- return ret;
+ return count;
}
dev_info(&pdev->dev, "msi enabled:%d msix enabled:%d\n", pdev->msi_enabled,
pdev->msix_enabled);
- for (i = 0; i < MSI_THERMAL_MAX; i++) {
+ for (i = 0; i < count; i++) {
irq = pci_irq_vector(pdev, i);
ret = devm_request_threaded_irq(&pdev->dev, irq, proc_thermal_irq_handler,
@@ -310,7 +322,7 @@ static int proc_thermal_setup_msi(struct pci_dev *pdev, struct proc_thermal_pci
return 0;
err_free_msi_vectors:
- pci_free_irq_vectors(pdev);
+ proc_thermal_free_msi(pdev, pci_info);
return ret;
}
@@ -397,7 +409,7 @@ static int proc_thermal_pci_probe(struct pci_dev *pdev, const struct pci_device_
err_free_vectors:
if (msi_irq)
- pci_free_irq_vectors(pdev);
+ proc_thermal_free_msi(pdev, pci_info);
err_ret_tzone:
thermal_zone_device_unregister(pci_info->tzone);
err_del_legacy:
@@ -419,6 +431,9 @@ static void proc_thermal_pci_remove(struct pci_dev *pdev)
proc_thermal_mmio_write(pci_info, PROC_THERMAL_MMIO_THRES_0, 0);
proc_thermal_mmio_write(pci_info, PROC_THERMAL_MMIO_INT_ENABLE_0, 0);
+ if (msi_irq)
+ proc_thermal_free_msi(pdev, pci_info);
+
thermal_zone_device_unregister(pci_info->tzone);
proc_thermal_mmio_remove(pdev, pci_info->proc_priv);
if (!pci_info->no_legacy)
diff --git a/drivers/thermal/thermal_trip.c b/drivers/thermal/thermal_trip.c
index c0b679b846b3..06a0554ddc38 100644
--- a/drivers/thermal/thermal_trip.c
+++ b/drivers/thermal/thermal_trip.c
@@ -88,10 +88,10 @@ void thermal_zone_set_trips(struct thermal_zone_device *tz)
return;
for_each_trip_desc(tz, td) {
- if (td->threshold < tz->temperature && td->threshold > low)
+ if (td->threshold <= tz->temperature && td->threshold > low)
low = td->threshold;
- if (td->threshold > tz->temperature && td->threshold < high)
+ if (td->threshold >= tz->temperature && td->threshold < high)
high = td->threshold;
}
diff --git a/drivers/ufs/core/ufshcd-priv.h b/drivers/ufs/core/ufshcd-priv.h
index ce36154ce963..7aea8fbaeee8 100644
--- a/drivers/ufs/core/ufshcd-priv.h
+++ b/drivers/ufs/core/ufshcd-priv.h
@@ -316,6 +316,11 @@ static inline int ufshcd_rpm_get_sync(struct ufs_hba *hba)
return pm_runtime_get_sync(&hba->ufs_device_wlun->sdev_gendev);
}
+static inline int ufshcd_rpm_get_if_active(struct ufs_hba *hba)
+{
+ return pm_runtime_get_if_active(&hba->ufs_device_wlun->sdev_gendev);
+}
+
static inline int ufshcd_rpm_put_sync(struct ufs_hba *hba)
{
return pm_runtime_put_sync(&hba->ufs_device_wlun->sdev_gendev);
diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c
index dc757ba47522..5e3c67e96956 100644
--- a/drivers/ufs/core/ufshcd.c
+++ b/drivers/ufs/core/ufshcd.c
@@ -2416,7 +2416,17 @@ static inline int ufshcd_hba_capabilities(struct ufs_hba *hba)
return err;
}
+ /*
+ * The UFSHCI 3.0 specification does not define MCQ_SUPPORT and
+ * LSDB_SUPPORT, but [31:29] as reserved bits with reset value 0s, which
+ * means we can simply read values regardless of version.
+ */
hba->mcq_sup = FIELD_GET(MASK_MCQ_SUPPORT, hba->capabilities);
+ /*
+ * 0h: legacy single doorbell support is available
+ * 1h: indicate that legacy single doorbell support has been removed
+ */
+ hba->lsdb_sup = !FIELD_GET(MASK_LSDB_SUPPORT, hba->capabilities);
if (!hba->mcq_sup)
return 0;
@@ -6553,7 +6563,8 @@ again:
if (ufshcd_err_handling_should_stop(hba))
goto skip_err_handling;
- if (hba->dev_quirks & UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS) {
+ if ((hba->dev_quirks & UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS) &&
+ !hba->force_reset) {
bool ret;
spin_unlock_irqrestore(hba->host->host_lock, flags);
@@ -8211,7 +8222,10 @@ static void ufshcd_update_rtc(struct ufs_hba *hba)
*/
val = ts64.tv_sec - hba->dev_info.rtc_time_baseline;
- ufshcd_rpm_get_sync(hba);
+ /* Skip update RTC if RPM state is not RPM_ACTIVE */
+ if (ufshcd_rpm_get_if_active(hba) <= 0)
+ return;
+
err = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_WRITE_ATTR, QUERY_ATTR_IDN_SECONDS_PASSED,
0, 0, &val);
ufshcd_rpm_put_sync(hba);
@@ -10265,9 +10279,6 @@ int ufshcd_system_restore(struct device *dev)
*/
ufshcd_readl(hba, REG_UTP_TASK_REQ_LIST_BASE_H);
- /* Resuming from hibernate, assume that link was OFF */
- ufshcd_set_link_off(hba);
-
return 0;
}
@@ -10496,6 +10507,12 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
}
if (!is_mcq_supported(hba)) {
+ if (!hba->lsdb_sup) {
+ dev_err(hba->dev, "%s: failed to initialize (legacy doorbell mode not supported)\n",
+ __func__);
+ err = -EINVAL;
+ goto out_disable;
+ }
err = scsi_add_host(host, hba->dev);
if (err) {
dev_err(hba->dev, "scsi_add_host failed\n");
diff --git a/drivers/ufs/host/ufs-exynos.c b/drivers/ufs/host/ufs-exynos.c
index 16ad3528d80b..9ec318ef52bf 100644
--- a/drivers/ufs/host/ufs-exynos.c
+++ b/drivers/ufs/host/ufs-exynos.c
@@ -1293,6 +1293,9 @@ static void exynos_ufs_fmp_resume(struct ufs_hba *hba)
{
struct arm_smccc_res res;
+ if (!(hba->caps & UFSHCD_CAP_CRYPTO))
+ return;
+
arm_smccc_smc(SMC_CMD_FMP_SECURITY, 0, SMU_EMBEDDED, CFG_DESCTYPE_3,
0, 0, 0, 0, &res);
if (res.a0)
diff --git a/drivers/vdpa/octeon_ep/octep_vdpa_hw.c b/drivers/vdpa/octeon_ep/octep_vdpa_hw.c
index 7fa0491bb201..11bd76ae18cf 100644
--- a/drivers/vdpa/octeon_ep/octep_vdpa_hw.c
+++ b/drivers/vdpa/octeon_ep/octep_vdpa_hw.c
@@ -140,7 +140,7 @@ static int octep_process_mbox(struct octep_hw *oct_hw, u16 id, u16 qid, void *bu
val = octep_read_sig(mbox);
if ((val & 0xFFFF) != MBOX_RSP_SIG) {
dev_warn(&pdev->dev, "Invalid Signature from mbox : %d response\n", id);
- return ret;
+ return -EINVAL;
}
val = octep_read_sts(mbox);
diff --git a/drivers/video/fbdev/core/fbcon.c b/drivers/video/fbdev/core/fbcon.c
index 3f7333dca508..2e093535884b 100644
--- a/drivers/video/fbdev/core/fbcon.c
+++ b/drivers/video/fbdev/core/fbcon.c
@@ -64,6 +64,8 @@
#include <linux/console.h>
#include <linux/string.h>
#include <linux/kd.h>
+#include <linux/panic.h>
+#include <linux/printk.h>
#include <linux/slab.h>
#include <linux/fb.h>
#include <linux/fbcon.h>
@@ -270,12 +272,24 @@ static int fbcon_get_rotate(struct fb_info *info)
return (ops) ? ops->rotate : 0;
}
+static bool fbcon_skip_panic(struct fb_info *info)
+{
+/* panic_cpu is not exported, and can't be used if built as module. Use
+ * oops_in_progress instead, but non-fatal oops won't be printed.
+ */
+#if defined(MODULE)
+ return (info->skip_panic && unlikely(oops_in_progress));
+#else
+ return (info->skip_panic && unlikely(atomic_read(&panic_cpu) != PANIC_CPU_INVALID));
+#endif
+}
+
static inline int fbcon_is_inactive(struct vc_data *vc, struct fb_info *info)
{
struct fbcon_ops *ops = info->fbcon_par;
return (info->state != FBINFO_STATE_RUNNING ||
- vc->vc_mode != KD_TEXT || ops->graphics);
+ vc->vc_mode != KD_TEXT || ops->graphics || fbcon_skip_panic(info));
}
static int get_color(struct vc_data *vc, struct fb_info *info,
diff --git a/drivers/virtio/virtio.c b/drivers/virtio/virtio.c
index a9b93e99c23a..bc1f962e483b 100644
--- a/drivers/virtio/virtio.c
+++ b/drivers/virtio/virtio.c
@@ -305,15 +305,9 @@ static int virtio_dev_probe(struct device *_d)
if (err)
goto err;
- if (dev->config->create_avq) {
- err = dev->config->create_avq(dev);
- if (err)
- goto err;
- }
-
err = drv->probe(dev);
if (err)
- goto err_probe;
+ goto err;
/* If probe didn't do it, mark device DRIVER_OK ourselves. */
if (!(dev->config->get_status(dev) & VIRTIO_CONFIG_S_DRIVER_OK))
@@ -326,9 +320,6 @@ static int virtio_dev_probe(struct device *_d)
return 0;
-err_probe:
- if (dev->config->destroy_avq)
- dev->config->destroy_avq(dev);
err:
virtio_add_status(dev, VIRTIO_CONFIG_S_FAILED);
return err;
@@ -344,9 +335,6 @@ static void virtio_dev_remove(struct device *_d)
drv->remove(dev);
- if (dev->config->destroy_avq)
- dev->config->destroy_avq(dev);
-
/* Driver should have reset device. */
WARN_ON_ONCE(dev->config->get_status(dev));
@@ -524,9 +512,6 @@ int virtio_device_freeze(struct virtio_device *dev)
}
}
- if (dev->config->destroy_avq)
- dev->config->destroy_avq(dev);
-
return 0;
}
EXPORT_SYMBOL_GPL(virtio_device_freeze);
@@ -562,16 +547,10 @@ int virtio_device_restore(struct virtio_device *dev)
if (ret)
goto err;
- if (dev->config->create_avq) {
- ret = dev->config->create_avq(dev);
- if (ret)
- goto err;
- }
-
if (drv->restore) {
ret = drv->restore(dev);
if (ret)
- goto err_restore;
+ goto err;
}
/* If restore didn't do it, mark device DRIVER_OK ourselves. */
@@ -582,9 +561,6 @@ int virtio_device_restore(struct virtio_device *dev)
return 0;
-err_restore:
- if (dev->config->destroy_avq)
- dev->config->destroy_avq(dev);
err:
virtio_add_status(dev, VIRTIO_CONFIG_S_FAILED);
return ret;
diff --git a/drivers/virtio/virtio_pci_common.c b/drivers/virtio/virtio_pci_common.c
index 7d82facafd75..c44d8ba00c02 100644
--- a/drivers/virtio/virtio_pci_common.c
+++ b/drivers/virtio/virtio_pci_common.c
@@ -46,12 +46,26 @@ bool vp_notify(struct virtqueue *vq)
return true;
}
+/* Notify all slow path virtqueues on an interrupt. */
+static void vp_vring_slow_path_interrupt(int irq,
+ struct virtio_pci_device *vp_dev)
+{
+ struct virtio_pci_vq_info *info;
+ unsigned long flags;
+
+ spin_lock_irqsave(&vp_dev->lock, flags);
+ list_for_each_entry(info, &vp_dev->slow_virtqueues, node)
+ vring_interrupt(irq, info->vq);
+ spin_unlock_irqrestore(&vp_dev->lock, flags);
+}
+
/* Handle a configuration change: Tell driver if it wants to know. */
static irqreturn_t vp_config_changed(int irq, void *opaque)
{
struct virtio_pci_device *vp_dev = opaque;
virtio_config_changed(&vp_dev->vdev);
+ vp_vring_slow_path_interrupt(irq, vp_dev);
return IRQ_HANDLED;
}
@@ -125,6 +139,9 @@ static int vp_request_msix_vectors(struct virtio_device *vdev, int nvectors,
GFP_KERNEL))
goto error;
+ if (!per_vq_vectors)
+ desc = NULL;
+
if (desc) {
flags |= PCI_IRQ_AFFINITY;
desc->pre_vectors++; /* virtio config vector */
@@ -171,11 +188,17 @@ error:
return err;
}
+static bool vp_is_slow_path_vector(u16 msix_vec)
+{
+ return msix_vec == VP_MSIX_CONFIG_VECTOR;
+}
+
static struct virtqueue *vp_setup_vq(struct virtio_device *vdev, unsigned int index,
void (*callback)(struct virtqueue *vq),
const char *name,
bool ctx,
- u16 msix_vec)
+ u16 msix_vec,
+ struct virtio_pci_vq_info **p_info)
{
struct virtio_pci_device *vp_dev = to_vp_device(vdev);
struct virtio_pci_vq_info *info = kmalloc(sizeof *info, GFP_KERNEL);
@@ -194,13 +217,16 @@ static struct virtqueue *vp_setup_vq(struct virtio_device *vdev, unsigned int in
info->vq = vq;
if (callback) {
spin_lock_irqsave(&vp_dev->lock, flags);
- list_add(&info->node, &vp_dev->virtqueues);
+ if (!vp_is_slow_path_vector(msix_vec))
+ list_add(&info->node, &vp_dev->virtqueues);
+ else
+ list_add(&info->node, &vp_dev->slow_virtqueues);
spin_unlock_irqrestore(&vp_dev->lock, flags);
} else {
INIT_LIST_HEAD(&info->node);
}
- vp_dev->vqs[index] = info;
+ *p_info = info;
return vq;
out_info:
@@ -236,13 +262,11 @@ void vp_del_vqs(struct virtio_device *vdev)
int i;
list_for_each_entry_safe(vq, n, &vdev->vqs, list) {
- if (vp_dev->is_avq && vp_dev->is_avq(vdev, vq->index))
- continue;
-
if (vp_dev->per_vq_vectors) {
int v = vp_dev->vqs[vq->index]->msix_vector;
- if (v != VIRTIO_MSI_NO_VECTOR) {
+ if (v != VIRTIO_MSI_NO_VECTOR &&
+ !vp_is_slow_path_vector(v)) {
int irq = pci_irq_vector(vp_dev->pci_dev, v);
irq_update_affinity_hint(irq, NULL);
@@ -284,21 +308,85 @@ void vp_del_vqs(struct virtio_device *vdev)
vp_dev->vqs = NULL;
}
+enum vp_vq_vector_policy {
+ VP_VQ_VECTOR_POLICY_EACH,
+ VP_VQ_VECTOR_POLICY_SHARED_SLOW,
+ VP_VQ_VECTOR_POLICY_SHARED,
+};
+
+static struct virtqueue *
+vp_find_one_vq_msix(struct virtio_device *vdev, int queue_idx,
+ vq_callback_t *callback, const char *name, bool ctx,
+ bool slow_path, int *allocated_vectors,
+ enum vp_vq_vector_policy vector_policy,
+ struct virtio_pci_vq_info **p_info)
+{
+ struct virtio_pci_device *vp_dev = to_vp_device(vdev);
+ struct virtqueue *vq;
+ u16 msix_vec;
+ int err;
+
+ if (!callback)
+ msix_vec = VIRTIO_MSI_NO_VECTOR;
+ else if (vector_policy == VP_VQ_VECTOR_POLICY_EACH ||
+ (vector_policy == VP_VQ_VECTOR_POLICY_SHARED_SLOW &&
+ !slow_path))
+ msix_vec = (*allocated_vectors)++;
+ else if (vector_policy != VP_VQ_VECTOR_POLICY_EACH &&
+ slow_path)
+ msix_vec = VP_MSIX_CONFIG_VECTOR;
+ else
+ msix_vec = VP_MSIX_VQ_VECTOR;
+ vq = vp_setup_vq(vdev, queue_idx, callback, name, ctx, msix_vec,
+ p_info);
+ if (IS_ERR(vq))
+ return vq;
+
+ if (vector_policy == VP_VQ_VECTOR_POLICY_SHARED ||
+ msix_vec == VIRTIO_MSI_NO_VECTOR ||
+ vp_is_slow_path_vector(msix_vec))
+ return vq;
+
+ /* allocate per-vq irq if available and necessary */
+ snprintf(vp_dev->msix_names[msix_vec], sizeof(*vp_dev->msix_names),
+ "%s-%s", dev_name(&vp_dev->vdev.dev), name);
+ err = request_irq(pci_irq_vector(vp_dev->pci_dev, msix_vec),
+ vring_interrupt, 0,
+ vp_dev->msix_names[msix_vec], vq);
+ if (err) {
+ vp_del_vq(vq);
+ return ERR_PTR(err);
+ }
+
+ return vq;
+}
+
static int vp_find_vqs_msix(struct virtio_device *vdev, unsigned int nvqs,
struct virtqueue *vqs[],
struct virtqueue_info vqs_info[],
- bool per_vq_vectors,
+ enum vp_vq_vector_policy vector_policy,
struct irq_affinity *desc)
{
struct virtio_pci_device *vp_dev = to_vp_device(vdev);
+ struct virtio_pci_admin_vq *avq = &vp_dev->admin_vq;
struct virtqueue_info *vqi;
- u16 msix_vec;
int i, err, nvectors, allocated_vectors, queue_idx = 0;
+ struct virtqueue *vq;
+ bool per_vq_vectors;
+ u16 avq_num = 0;
vp_dev->vqs = kcalloc(nvqs, sizeof(*vp_dev->vqs), GFP_KERNEL);
if (!vp_dev->vqs)
return -ENOMEM;
+ if (vp_dev->avq_index) {
+ err = vp_dev->avq_index(vdev, &avq->vq_index, &avq_num);
+ if (err)
+ goto error_find;
+ }
+
+ per_vq_vectors = vector_policy != VP_VQ_VECTOR_POLICY_SHARED;
+
if (per_vq_vectors) {
/* Best option: one for change interrupt, one per vq. */
nvectors = 1;
@@ -307,13 +395,14 @@ static int vp_find_vqs_msix(struct virtio_device *vdev, unsigned int nvqs,
if (vqi->name && vqi->callback)
++nvectors;
}
+ if (avq_num && vector_policy == VP_VQ_VECTOR_POLICY_EACH)
+ ++nvectors;
} else {
/* Second best: one for change, shared for all vqs. */
nvectors = 2;
}
- err = vp_request_msix_vectors(vdev, nvectors, per_vq_vectors,
- per_vq_vectors ? desc : NULL);
+ err = vp_request_msix_vectors(vdev, nvectors, per_vq_vectors, desc);
if (err)
goto error_find;
@@ -325,37 +414,27 @@ static int vp_find_vqs_msix(struct virtio_device *vdev, unsigned int nvqs,
vqs[i] = NULL;
continue;
}
-
- if (!vqi->callback)
- msix_vec = VIRTIO_MSI_NO_VECTOR;
- else if (vp_dev->per_vq_vectors)
- msix_vec = allocated_vectors++;
- else
- msix_vec = VP_MSIX_VQ_VECTOR;
- vqs[i] = vp_setup_vq(vdev, queue_idx++, vqi->callback,
- vqi->name, vqi->ctx, msix_vec);
+ vqs[i] = vp_find_one_vq_msix(vdev, queue_idx++, vqi->callback,
+ vqi->name, vqi->ctx, false,
+ &allocated_vectors, vector_policy,
+ &vp_dev->vqs[i]);
if (IS_ERR(vqs[i])) {
err = PTR_ERR(vqs[i]);
goto error_find;
}
+ }
- if (!vp_dev->per_vq_vectors || msix_vec == VIRTIO_MSI_NO_VECTOR)
- continue;
-
- /* allocate per-vq irq if available and necessary */
- snprintf(vp_dev->msix_names[msix_vec],
- sizeof *vp_dev->msix_names,
- "%s-%s",
- dev_name(&vp_dev->vdev.dev), vqi->name);
- err = request_irq(pci_irq_vector(vp_dev->pci_dev, msix_vec),
- vring_interrupt, 0,
- vp_dev->msix_names[msix_vec],
- vqs[i]);
- if (err) {
- vp_del_vq(vqs[i]);
- goto error_find;
- }
+ if (!avq_num)
+ return 0;
+ sprintf(avq->name, "avq.%u", avq->vq_index);
+ vq = vp_find_one_vq_msix(vdev, avq->vq_index, vp_modern_avq_done,
+ avq->name, false, true, &allocated_vectors,
+ vector_policy, &vp_dev->admin_vq.info);
+ if (IS_ERR(vq)) {
+ err = PTR_ERR(vq);
+ goto error_find;
}
+
return 0;
error_find:
@@ -368,12 +447,21 @@ static int vp_find_vqs_intx(struct virtio_device *vdev, unsigned int nvqs,
struct virtqueue_info vqs_info[])
{
struct virtio_pci_device *vp_dev = to_vp_device(vdev);
+ struct virtio_pci_admin_vq *avq = &vp_dev->admin_vq;
int i, err, queue_idx = 0;
+ struct virtqueue *vq;
+ u16 avq_num = 0;
vp_dev->vqs = kcalloc(nvqs, sizeof(*vp_dev->vqs), GFP_KERNEL);
if (!vp_dev->vqs)
return -ENOMEM;
+ if (vp_dev->avq_index) {
+ err = vp_dev->avq_index(vdev, &avq->vq_index, &avq_num);
+ if (err)
+ goto out_del_vqs;
+ }
+
err = request_irq(vp_dev->pci_dev->irq, vp_interrupt, IRQF_SHARED,
dev_name(&vdev->dev), vp_dev);
if (err)
@@ -390,13 +478,24 @@ static int vp_find_vqs_intx(struct virtio_device *vdev, unsigned int nvqs,
}
vqs[i] = vp_setup_vq(vdev, queue_idx++, vqi->callback,
vqi->name, vqi->ctx,
- VIRTIO_MSI_NO_VECTOR);
+ VIRTIO_MSI_NO_VECTOR, &vp_dev->vqs[i]);
if (IS_ERR(vqs[i])) {
err = PTR_ERR(vqs[i]);
goto out_del_vqs;
}
}
+ if (!avq_num)
+ return 0;
+ sprintf(avq->name, "avq.%u", avq->vq_index);
+ vq = vp_setup_vq(vdev, queue_idx++, vp_modern_avq_done, avq->name,
+ false, VIRTIO_MSI_NO_VECTOR,
+ &vp_dev->admin_vq.info);
+ if (IS_ERR(vq)) {
+ err = PTR_ERR(vq);
+ goto out_del_vqs;
+ }
+
return 0;
out_del_vqs:
vp_del_vqs(vdev);
@@ -411,11 +510,20 @@ int vp_find_vqs(struct virtio_device *vdev, unsigned int nvqs,
int err;
/* Try MSI-X with one vector per queue. */
- err = vp_find_vqs_msix(vdev, nvqs, vqs, vqs_info, true, desc);
+ err = vp_find_vqs_msix(vdev, nvqs, vqs, vqs_info,
+ VP_VQ_VECTOR_POLICY_EACH, desc);
+ if (!err)
+ return 0;
+ /* Fallback: MSI-X with one shared vector for config and
+ * slow path queues, one vector per queue for the rest.
+ */
+ err = vp_find_vqs_msix(vdev, nvqs, vqs, vqs_info,
+ VP_VQ_VECTOR_POLICY_SHARED_SLOW, desc);
if (!err)
return 0;
/* Fallback: MSI-X with one vector for config, one shared for queues. */
- err = vp_find_vqs_msix(vdev, nvqs, vqs, vqs_info, false, desc);
+ err = vp_find_vqs_msix(vdev, nvqs, vqs, vqs_info,
+ VP_VQ_VECTOR_POLICY_SHARED, desc);
if (!err)
return 0;
/* Is there an interrupt? If not give up. */
@@ -466,7 +574,8 @@ const struct cpumask *vp_get_vq_affinity(struct virtio_device *vdev, int index)
struct virtio_pci_device *vp_dev = to_vp_device(vdev);
if (!vp_dev->per_vq_vectors ||
- vp_dev->vqs[index]->msix_vector == VIRTIO_MSI_NO_VECTOR)
+ vp_dev->vqs[index]->msix_vector == VIRTIO_MSI_NO_VECTOR ||
+ vp_is_slow_path_vector(vp_dev->vqs[index]->msix_vector))
return NULL;
return pci_irq_get_affinity(vp_dev->pci_dev,
@@ -574,6 +683,7 @@ static int virtio_pci_probe(struct pci_dev *pci_dev,
vp_dev->vdev.dev.release = virtio_pci_release_dev;
vp_dev->pci_dev = pci_dev;
INIT_LIST_HEAD(&vp_dev->virtqueues);
+ INIT_LIST_HEAD(&vp_dev->slow_virtqueues);
spin_lock_init(&vp_dev->lock);
/* enable the device */
diff --git a/drivers/virtio/virtio_pci_common.h b/drivers/virtio/virtio_pci_common.h
index 3c4bb2d6163a..1d9c49947f52 100644
--- a/drivers/virtio/virtio_pci_common.h
+++ b/drivers/virtio/virtio_pci_common.h
@@ -35,7 +35,7 @@ struct virtio_pci_vq_info {
/* the actual virtqueue */
struct virtqueue *vq;
- /* the list node for the virtqueues list */
+ /* the list node for the virtqueues or slow_virtqueues list */
struct list_head node;
/* MSI-X vector (or none) */
@@ -44,9 +44,9 @@ struct virtio_pci_vq_info {
struct virtio_pci_admin_vq {
/* Virtqueue info associated with this admin queue. */
- struct virtio_pci_vq_info info;
- /* serializing admin commands execution and virtqueue deletion */
- struct mutex cmd_lock;
+ struct virtio_pci_vq_info *info;
+ /* Protects virtqueue access. */
+ spinlock_t lock;
u64 supported_cmds;
/* Name of the admin queue: avq.$vq_index. */
char name[10];
@@ -66,9 +66,12 @@ struct virtio_pci_device {
/* Where to read and clear interrupt */
u8 __iomem *isr;
- /* a list of queues so we can dispatch IRQs */
+ /* Lists of queues and potentially slow path queues
+ * so we can dispatch IRQs.
+ */
spinlock_t lock;
struct list_head virtqueues;
+ struct list_head slow_virtqueues;
/* Array of all virtqueues reported in the
* PCI common config num_queues field
@@ -102,7 +105,7 @@ struct virtio_pci_device {
void (*del_vq)(struct virtio_pci_vq_info *info);
u16 (*config_vector)(struct virtio_pci_device *vp_dev, u16 vector);
- bool (*is_avq)(struct virtio_device *vdev, unsigned int index);
+ int (*avq_index)(struct virtio_device *vdev, u16 *index, u16 *num);
};
/* Constants for MSI-X */
@@ -175,6 +178,7 @@ struct virtio_device *virtio_pci_vf_get_pf_dev(struct pci_dev *pdev);
#define VIRTIO_ADMIN_CMD_BITMAP 0
#endif
+void vp_modern_avq_done(struct virtqueue *vq);
int vp_modern_admin_cmd_exec(struct virtio_device *vdev,
struct virtio_admin_cmd *cmd);
diff --git a/drivers/virtio/virtio_pci_modern.c b/drivers/virtio/virtio_pci_modern.c
index 3b5b9499a53a..9193c30d640a 100644
--- a/drivers/virtio/virtio_pci_modern.c
+++ b/drivers/virtio/virtio_pci_modern.c
@@ -28,6 +28,21 @@ static u64 vp_get_features(struct virtio_device *vdev)
return vp_modern_get_features(&vp_dev->mdev);
}
+static int vp_avq_index(struct virtio_device *vdev, u16 *index, u16 *num)
+{
+ struct virtio_pci_device *vp_dev = to_vp_device(vdev);
+
+ *num = 0;
+ if (!virtio_has_feature(vdev, VIRTIO_F_ADMIN_VQ))
+ return 0;
+
+ *num = vp_modern_avq_num(&vp_dev->mdev);
+ if (!(*num))
+ return -EINVAL;
+ *index = vp_modern_avq_index(&vp_dev->mdev);
+ return 0;
+}
+
static bool vp_is_avq(struct virtio_device *vdev, unsigned int index)
{
struct virtio_pci_device *vp_dev = to_vp_device(vdev);
@@ -38,17 +53,35 @@ static bool vp_is_avq(struct virtio_device *vdev, unsigned int index)
return index == vp_dev->admin_vq.vq_index;
}
+void vp_modern_avq_done(struct virtqueue *vq)
+{
+ struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev);
+ struct virtio_pci_admin_vq *admin_vq = &vp_dev->admin_vq;
+ struct virtio_admin_cmd *cmd;
+ unsigned long flags;
+ unsigned int len;
+
+ spin_lock_irqsave(&admin_vq->lock, flags);
+ do {
+ virtqueue_disable_cb(vq);
+ while ((cmd = virtqueue_get_buf(vq, &len)))
+ complete(&cmd->completion);
+ } while (!virtqueue_enable_cb(vq));
+ spin_unlock_irqrestore(&admin_vq->lock, flags);
+}
+
static int virtqueue_exec_admin_cmd(struct virtio_pci_admin_vq *admin_vq,
u16 opcode,
struct scatterlist **sgs,
unsigned int out_num,
unsigned int in_num,
- void *data)
+ struct virtio_admin_cmd *cmd)
{
struct virtqueue *vq;
- int ret, len;
+ unsigned long flags;
+ int ret;
- vq = admin_vq->info.vq;
+ vq = admin_vq->info->vq;
if (!vq)
return -EIO;
@@ -57,21 +90,33 @@ static int virtqueue_exec_admin_cmd(struct virtio_pci_admin_vq *admin_vq,
!((1ULL << opcode) & admin_vq->supported_cmds))
return -EOPNOTSUPP;
- ret = virtqueue_add_sgs(vq, sgs, out_num, in_num, data, GFP_KERNEL);
- if (ret < 0)
- return -EIO;
+ init_completion(&cmd->completion);
- if (unlikely(!virtqueue_kick(vq)))
+again:
+ if (virtqueue_is_broken(vq))
return -EIO;
- while (!virtqueue_get_buf(vq, &len) &&
- !virtqueue_is_broken(vq))
- cpu_relax();
+ spin_lock_irqsave(&admin_vq->lock, flags);
+ ret = virtqueue_add_sgs(vq, sgs, out_num, in_num, cmd, GFP_KERNEL);
+ if (ret < 0) {
+ if (ret == -ENOSPC) {
+ spin_unlock_irqrestore(&admin_vq->lock, flags);
+ cpu_relax();
+ goto again;
+ }
+ goto unlock_err;
+ }
+ if (!virtqueue_kick(vq))
+ goto unlock_err;
+ spin_unlock_irqrestore(&admin_vq->lock, flags);
- if (virtqueue_is_broken(vq))
- return -EIO;
+ wait_for_completion(&cmd->completion);
- return 0;
+ return cmd->ret;
+
+unlock_err:
+ spin_unlock_irqrestore(&admin_vq->lock, flags);
+ return -EIO;
}
int vp_modern_admin_cmd_exec(struct virtio_device *vdev,
@@ -122,12 +167,9 @@ int vp_modern_admin_cmd_exec(struct virtio_device *vdev,
in_num++;
}
- mutex_lock(&vp_dev->admin_vq.cmd_lock);
ret = virtqueue_exec_admin_cmd(&vp_dev->admin_vq,
le16_to_cpu(cmd->opcode),
- sgs, out_num, in_num, sgs);
- mutex_unlock(&vp_dev->admin_vq.cmd_lock);
-
+ sgs, out_num, in_num, cmd);
if (ret) {
dev_err(&vdev->dev,
"Failed to execute command on admin vq: %d\n.", ret);
@@ -188,25 +230,29 @@ end:
static void vp_modern_avq_activate(struct virtio_device *vdev)
{
- struct virtio_pci_device *vp_dev = to_vp_device(vdev);
- struct virtio_pci_admin_vq *admin_vq = &vp_dev->admin_vq;
-
if (!virtio_has_feature(vdev, VIRTIO_F_ADMIN_VQ))
return;
- __virtqueue_unbreak(admin_vq->info.vq);
virtio_pci_admin_cmd_list_init(vdev);
}
-static void vp_modern_avq_deactivate(struct virtio_device *vdev)
+static void vp_modern_avq_cleanup(struct virtio_device *vdev)
{
struct virtio_pci_device *vp_dev = to_vp_device(vdev);
- struct virtio_pci_admin_vq *admin_vq = &vp_dev->admin_vq;
+ struct virtio_admin_cmd *cmd;
+ struct virtqueue *vq;
if (!virtio_has_feature(vdev, VIRTIO_F_ADMIN_VQ))
return;
- __virtqueue_break(admin_vq->info.vq);
+ vq = vp_dev->vqs[vp_dev->admin_vq.vq_index]->vq;
+ if (!vq)
+ return;
+
+ while ((cmd = virtqueue_detach_unused_buf(vq))) {
+ cmd->ret = -EIO;
+ complete(&cmd->completion);
+ }
}
static void vp_transport_features(struct virtio_device *vdev, u64 features)
@@ -403,7 +449,7 @@ static void vp_reset(struct virtio_device *vdev)
while (vp_modern_get_status(mdev))
msleep(1);
- vp_modern_avq_deactivate(vdev);
+ vp_modern_avq_cleanup(vdev);
/* Flush pending VQ/configuration callbacks. */
vp_synchronize_vectors(vdev);
@@ -552,8 +598,7 @@ static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev,
if (index >= vp_modern_get_num_queues(mdev) && !is_avq)
return ERR_PTR(-EINVAL);
- num = is_avq ?
- VIRTIO_AVQ_SGS_MAX : vp_modern_get_queue_size(mdev, index);
+ num = vp_modern_get_queue_size(mdev, index);
/* Check if queue is either not available or already active. */
if (!num || vp_modern_get_queue_enable(mdev, index))
return ERR_PTR(-ENOENT);
@@ -580,12 +625,6 @@ static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev,
goto err;
}
- if (is_avq) {
- mutex_lock(&vp_dev->admin_vq.cmd_lock);
- vp_dev->admin_vq.info.vq = vq;
- mutex_unlock(&vp_dev->admin_vq.cmd_lock);
- }
-
return vq;
err:
@@ -620,12 +659,6 @@ static void del_vq(struct virtio_pci_vq_info *info)
struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev);
struct virtio_pci_modern_device *mdev = &vp_dev->mdev;
- if (vp_is_avq(&vp_dev->vdev, vq->index)) {
- mutex_lock(&vp_dev->admin_vq.cmd_lock);
- vp_dev->admin_vq.info.vq = NULL;
- mutex_unlock(&vp_dev->admin_vq.cmd_lock);
- }
-
if (vp_dev->msix_enabled)
vp_modern_queue_vector(mdev, vq->index,
VIRTIO_MSI_NO_VECTOR);
@@ -735,45 +768,6 @@ static bool vp_get_shm_region(struct virtio_device *vdev,
return true;
}
-static int vp_modern_create_avq(struct virtio_device *vdev)
-{
- struct virtio_pci_device *vp_dev = to_vp_device(vdev);
- struct virtio_pci_admin_vq *avq;
- struct virtqueue *vq;
- u16 admin_q_num;
-
- if (!virtio_has_feature(vdev, VIRTIO_F_ADMIN_VQ))
- return 0;
-
- admin_q_num = vp_modern_avq_num(&vp_dev->mdev);
- if (!admin_q_num)
- return -EINVAL;
-
- avq = &vp_dev->admin_vq;
- avq->vq_index = vp_modern_avq_index(&vp_dev->mdev);
- sprintf(avq->name, "avq.%u", avq->vq_index);
- vq = vp_dev->setup_vq(vp_dev, &vp_dev->admin_vq.info, avq->vq_index, NULL,
- avq->name, NULL, VIRTIO_MSI_NO_VECTOR);
- if (IS_ERR(vq)) {
- dev_err(&vdev->dev, "failed to setup admin virtqueue, err=%ld",
- PTR_ERR(vq));
- return PTR_ERR(vq);
- }
-
- vp_modern_set_queue_enable(&vp_dev->mdev, avq->info.vq->index, true);
- return 0;
-}
-
-static void vp_modern_destroy_avq(struct virtio_device *vdev)
-{
- struct virtio_pci_device *vp_dev = to_vp_device(vdev);
-
- if (!virtio_has_feature(vdev, VIRTIO_F_ADMIN_VQ))
- return;
-
- vp_dev->del_vq(&vp_dev->admin_vq.info);
-}
-
static const struct virtio_config_ops virtio_pci_config_nodev_ops = {
.get = NULL,
.set = NULL,
@@ -792,8 +786,6 @@ static const struct virtio_config_ops virtio_pci_config_nodev_ops = {
.get_shm_region = vp_get_shm_region,
.disable_vq_and_reset = vp_modern_disable_vq_and_reset,
.enable_vq_after_reset = vp_modern_enable_vq_after_reset,
- .create_avq = vp_modern_create_avq,
- .destroy_avq = vp_modern_destroy_avq,
};
static const struct virtio_config_ops virtio_pci_config_ops = {
@@ -814,8 +806,6 @@ static const struct virtio_config_ops virtio_pci_config_ops = {
.get_shm_region = vp_get_shm_region,
.disable_vq_and_reset = vp_modern_disable_vq_and_reset,
.enable_vq_after_reset = vp_modern_enable_vq_after_reset,
- .create_avq = vp_modern_create_avq,
- .destroy_avq = vp_modern_destroy_avq,
};
/* the PCI probing function */
@@ -839,11 +829,11 @@ int virtio_pci_modern_probe(struct virtio_pci_device *vp_dev)
vp_dev->config_vector = vp_config_vector;
vp_dev->setup_vq = setup_vq;
vp_dev->del_vq = del_vq;
- vp_dev->is_avq = vp_is_avq;
+ vp_dev->avq_index = vp_avq_index;
vp_dev->isr = mdev->isr;
vp_dev->vdev.id = mdev->id;
- mutex_init(&vp_dev->admin_vq.cmd_lock);
+ spin_lock_init(&vp_dev->admin_vq.lock);
return 0;
}
@@ -851,6 +841,5 @@ void virtio_pci_modern_remove(struct virtio_pci_device *vp_dev)
{
struct virtio_pci_modern_device *mdev = &vp_dev->mdev;
- mutex_destroy(&vp_dev->admin_vq.cmd_lock);
vp_modern_remove(mdev);
}
diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c
index 498442d0c216..2e49d978f504 100644
--- a/fs/btrfs/block-group.c
+++ b/fs/btrfs/block-group.c
@@ -1223,8 +1223,8 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
block_group->space_info->total_bytes -= block_group->length;
block_group->space_info->bytes_readonly -=
(block_group->length - block_group->zone_unusable);
- block_group->space_info->bytes_zone_unusable -=
- block_group->zone_unusable;
+ btrfs_space_info_update_bytes_zone_unusable(fs_info, block_group->space_info,
+ -block_group->zone_unusable);
block_group->space_info->disk_total -= block_group->length * factor;
spin_unlock(&block_group->space_info->lock);
@@ -1396,7 +1396,8 @@ static int inc_block_group_ro(struct btrfs_block_group *cache, int force)
if (btrfs_is_zoned(cache->fs_info)) {
/* Migrate zone_unusable bytes to readonly */
sinfo->bytes_readonly += cache->zone_unusable;
- sinfo->bytes_zone_unusable -= cache->zone_unusable;
+ btrfs_space_info_update_bytes_zone_unusable(cache->fs_info, sinfo,
+ -cache->zone_unusable);
cache->zone_unusable = 0;
}
cache->ro++;
@@ -3056,9 +3057,11 @@ void btrfs_dec_block_group_ro(struct btrfs_block_group *cache)
if (btrfs_is_zoned(cache->fs_info)) {
/* Migrate zone_unusable bytes back */
cache->zone_unusable =
- (cache->alloc_offset - cache->used) +
+ (cache->alloc_offset - cache->used - cache->pinned -
+ cache->reserved) +
(cache->length - cache->zone_capacity);
- sinfo->bytes_zone_unusable += cache->zone_unusable;
+ btrfs_space_info_update_bytes_zone_unusable(cache->fs_info, sinfo,
+ cache->zone_unusable);
sinfo->bytes_readonly -= cache->zone_unusable;
}
num_bytes = cache->length - cache->reserved -
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index c8568b1a61c4..75fa563e4cac 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -459,6 +459,7 @@ struct btrfs_file_private {
void *filldir_buf;
u64 last_index;
struct extent_state *llseek_cached_state;
+ bool fsync_skip_inode_lock;
};
static inline u32 BTRFS_LEAF_DATA_SIZE(const struct btrfs_fs_info *info)
diff --git a/fs/btrfs/direct-io.c b/fs/btrfs/direct-io.c
index f9fb2db6a1e4..67adbe9d294a 100644
--- a/fs/btrfs/direct-io.c
+++ b/fs/btrfs/direct-io.c
@@ -856,21 +856,37 @@ relock:
* So here we disable page faults in the iov_iter and then retry if we
* got -EFAULT, faulting in the pages before the retry.
*/
+again:
from->nofault = true;
dio = btrfs_dio_write(iocb, from, written);
from->nofault = false;
- /*
- * iomap_dio_complete() will call btrfs_sync_file() if we have a dsync
- * iocb, and that needs to lock the inode. So unlock it before calling
- * iomap_dio_complete() to avoid a deadlock.
- */
- btrfs_inode_unlock(BTRFS_I(inode), ilock_flags);
-
- if (IS_ERR_OR_NULL(dio))
+ if (IS_ERR_OR_NULL(dio)) {
ret = PTR_ERR_OR_ZERO(dio);
- else
+ } else {
+ struct btrfs_file_private stack_private = { 0 };
+ struct btrfs_file_private *private;
+ const bool have_private = (file->private_data != NULL);
+
+ if (!have_private)
+ file->private_data = &stack_private;
+
+ /*
+ * If we have a synchronous write, we must make sure the fsync
+ * triggered by the iomap_dio_complete() call below doesn't
+ * deadlock on the inode lock - we are already holding it and we
+ * can't call it after unlocking because we may need to complete
+ * partial writes due to the input buffer (or parts of it) not
+ * being already faulted in.
+ */
+ private = file->private_data;
+ private->fsync_skip_inode_lock = true;
ret = iomap_dio_complete(dio);
+ private->fsync_skip_inode_lock = false;
+
+ if (!have_private)
+ file->private_data = NULL;
+ }
/* No increment (+=) because iomap returns a cumulative value. */
if (ret > 0)
@@ -897,10 +913,12 @@ relock:
} else {
fault_in_iov_iter_readable(from, left);
prev_left = left;
- goto relock;
+ goto again;
}
}
+ btrfs_inode_unlock(BTRFS_I(inode), ilock_flags);
+
/*
* If 'ret' is -ENOTBLK or we have not written all data, then it means
* we must fallback to buffered IO.
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index d77498e7671c..ff9f0d41987e 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -2793,7 +2793,8 @@ static int unpin_extent_range(struct btrfs_fs_info *fs_info,
readonly = true;
} else if (btrfs_is_zoned(fs_info)) {
/* Need reset before reusing in a zoned block group */
- space_info->bytes_zone_unusable += len;
+ btrfs_space_info_update_bytes_zone_unusable(fs_info, space_info,
+ len);
readonly = true;
}
spin_unlock(&cache->lock);
diff --git a/fs/btrfs/extent_map.c b/fs/btrfs/extent_map.c
index 81558f90ee80..23b65dc73c00 100644
--- a/fs/btrfs/extent_map.c
+++ b/fs/btrfs/extent_map.c
@@ -664,7 +664,7 @@ static noinline int merge_extent_mapping(struct btrfs_inode *inode,
start_diff = start - em->start;
em->start = start;
em->len = end - start;
- if (em->disk_bytenr < EXTENT_MAP_LAST_BYTE && !extent_map_is_compressed(em))
+ if (em->disk_bytenr < EXTENT_MAP_LAST_BYTE)
em->offset += start_diff;
return add_extent_mapping(inode, em, 0);
}
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index 21381de906f6..9f10a9f23fcc 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -1603,6 +1603,7 @@ static inline bool skip_inode_logging(const struct btrfs_log_ctx *ctx)
*/
int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
{
+ struct btrfs_file_private *private = file->private_data;
struct dentry *dentry = file_dentry(file);
struct btrfs_inode *inode = BTRFS_I(d_inode(dentry));
struct btrfs_root *root = inode->root;
@@ -1612,6 +1613,7 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
int ret = 0, err;
u64 len;
bool full_sync;
+ const bool skip_ilock = (private ? private->fsync_skip_inode_lock : false);
trace_btrfs_sync_file(file, datasync);
@@ -1639,7 +1641,10 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
if (ret)
goto out;
- btrfs_inode_lock(inode, BTRFS_ILOCK_MMAP);
+ if (skip_ilock)
+ down_write(&inode->i_mmap_lock);
+ else
+ btrfs_inode_lock(inode, BTRFS_ILOCK_MMAP);
atomic_inc(&root->log_batch);
@@ -1663,7 +1668,10 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
*/
ret = start_ordered_ops(inode, start, end);
if (ret) {
- btrfs_inode_unlock(inode, BTRFS_ILOCK_MMAP);
+ if (skip_ilock)
+ up_write(&inode->i_mmap_lock);
+ else
+ btrfs_inode_unlock(inode, BTRFS_ILOCK_MMAP);
goto out;
}
@@ -1788,7 +1796,10 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
* file again, but that will end up using the synchronization
* inside btrfs_sync_log to keep things safe.
*/
- btrfs_inode_unlock(inode, BTRFS_ILOCK_MMAP);
+ if (skip_ilock)
+ up_write(&inode->i_mmap_lock);
+ else
+ btrfs_inode_unlock(inode, BTRFS_ILOCK_MMAP);
if (ret == BTRFS_NO_LOG_SYNC) {
ret = btrfs_end_transaction(trans);
diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
index 3f9b7507543a..f5996a43db24 100644
--- a/fs/btrfs/free-space-cache.c
+++ b/fs/btrfs/free-space-cache.c
@@ -2723,8 +2723,10 @@ static int __btrfs_add_free_space_zoned(struct btrfs_block_group *block_group,
* If the block group is read-only, we should account freed space into
* bytes_readonly.
*/
- if (!block_group->ro)
+ if (!block_group->ro) {
block_group->zone_unusable += to_unusable;
+ WARN_ON(block_group->zone_unusable > block_group->length);
+ }
spin_unlock(&ctl->tree_lock);
if (!used) {
spin_lock(&block_group->lock);
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 01eab6955647..19d05a4c5c33 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -714,8 +714,9 @@ out:
return ret;
}
-static noinline int cow_file_range_inline(struct btrfs_inode *inode, u64 offset,
- u64 end,
+static noinline int cow_file_range_inline(struct btrfs_inode *inode,
+ struct page *locked_page,
+ u64 offset, u64 end,
size_t compressed_size,
int compress_type,
struct folio *compressed_folio,
@@ -739,7 +740,10 @@ static noinline int cow_file_range_inline(struct btrfs_inode *inode, u64 offset,
return ret;
}
- extent_clear_unlock_delalloc(inode, offset, end, NULL, &cached,
+ if (ret == 0)
+ locked_page = NULL;
+
+ extent_clear_unlock_delalloc(inode, offset, end, locked_page, &cached,
clear_flags,
PAGE_UNLOCK | PAGE_START_WRITEBACK |
PAGE_END_WRITEBACK);
@@ -1043,10 +1047,10 @@ again:
* extent for the subpage case.
*/
if (total_in < actual_end)
- ret = cow_file_range_inline(inode, start, end, 0,
+ ret = cow_file_range_inline(inode, NULL, start, end, 0,
BTRFS_COMPRESS_NONE, NULL, false);
else
- ret = cow_file_range_inline(inode, start, end, total_compressed,
+ ret = cow_file_range_inline(inode, NULL, start, end, total_compressed,
compress_type, folios[0], false);
if (ret <= 0) {
if (ret < 0)
@@ -1359,7 +1363,7 @@ static noinline int cow_file_range(struct btrfs_inode *inode,
if (!no_inline) {
/* lets try to make an inline extent */
- ret = cow_file_range_inline(inode, start, end, 0,
+ ret = cow_file_range_inline(inode, locked_page, start, end, 0,
BTRFS_COMPRESS_NONE, NULL, false);
if (ret <= 0) {
/*
@@ -5660,7 +5664,7 @@ struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry)
struct inode *inode;
struct btrfs_root *root = BTRFS_I(dir)->root;
struct btrfs_root *sub_root = root;
- struct btrfs_key location;
+ struct btrfs_key location = { 0 };
u8 di_type = 0;
int ret = 0;
diff --git a/fs/btrfs/space-info.c b/fs/btrfs/space-info.c
index 9ac94d3119e8..68e14fd48638 100644
--- a/fs/btrfs/space-info.c
+++ b/fs/btrfs/space-info.c
@@ -316,7 +316,7 @@ void btrfs_add_bg_to_space_info(struct btrfs_fs_info *info,
found->bytes_used += block_group->used;
found->disk_used += block_group->used * factor;
found->bytes_readonly += block_group->bytes_super;
- found->bytes_zone_unusable += block_group->zone_unusable;
+ btrfs_space_info_update_bytes_zone_unusable(info, found, block_group->zone_unusable);
if (block_group->length > 0)
found->full = 0;
btrfs_try_granting_tickets(info, found);
@@ -583,8 +583,7 @@ again:
spin_lock(&cache->lock);
avail = cache->length - cache->used - cache->pinned -
- cache->reserved - cache->delalloc_bytes -
- cache->bytes_super - cache->zone_unusable;
+ cache->reserved - cache->bytes_super - cache->zone_unusable;
btrfs_info(fs_info,
"block group %llu has %llu bytes, %llu used %llu pinned %llu reserved %llu delalloc %llu super %llu zone_unusable (%llu bytes available) %s",
cache->start, cache->length, cache->used, cache->pinned,
diff --git a/fs/btrfs/space-info.h b/fs/btrfs/space-info.h
index 4db8a0267c16..88b44221ce97 100644
--- a/fs/btrfs/space-info.h
+++ b/fs/btrfs/space-info.h
@@ -249,6 +249,7 @@ btrfs_space_info_update_##name(struct btrfs_fs_info *fs_info, \
DECLARE_SPACE_INFO_UPDATE(bytes_may_use, "space_info");
DECLARE_SPACE_INFO_UPDATE(bytes_pinned, "pinned");
+DECLARE_SPACE_INFO_UPDATE(bytes_zone_unusable, "zone_unusable");
int btrfs_init_space_info(struct btrfs_fs_info *fs_info);
void btrfs_add_bg_to_space_info(struct btrfs_fs_info *info,
diff --git a/fs/btrfs/tests/extent-map-tests.c b/fs/btrfs/tests/extent-map-tests.c
index ebec4ab361b8..56e61ac1cc64 100644
--- a/fs/btrfs/tests/extent-map-tests.c
+++ b/fs/btrfs/tests/extent-map-tests.c
@@ -900,6 +900,102 @@ out:
return ret;
}
+/*
+ * Test a regression for compressed extent map adjustment when we attempt to
+ * add an extent map that is partially overlapped by another existing extent
+ * map. The resulting extent map offset was left unchanged despite having
+ * incremented its start offset.
+ */
+static int test_case_8(struct btrfs_fs_info *fs_info, struct btrfs_inode *inode)
+{
+ struct extent_map_tree *em_tree = &inode->extent_tree;
+ struct extent_map *em;
+ int ret;
+ int ret2;
+
+ em = alloc_extent_map();
+ if (!em) {
+ test_std_err(TEST_ALLOC_EXTENT_MAP);
+ return -ENOMEM;
+ }
+
+ /* Compressed extent for the file range [120K, 128K). */
+ em->start = SZ_1K * 120;
+ em->len = SZ_8K;
+ em->disk_num_bytes = SZ_4K;
+ em->ram_bytes = SZ_8K;
+ em->flags |= EXTENT_FLAG_COMPRESS_ZLIB;
+ write_lock(&em_tree->lock);
+ ret = btrfs_add_extent_mapping(inode, &em, em->start, em->len);
+ write_unlock(&em_tree->lock);
+ free_extent_map(em);
+ if (ret < 0) {
+ test_err("couldn't add extent map for range [120K, 128K)");
+ goto out;
+ }
+
+ em = alloc_extent_map();
+ if (!em) {
+ test_std_err(TEST_ALLOC_EXTENT_MAP);
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ /*
+ * Compressed extent for the file range [108K, 144K), which overlaps
+ * with the [120K, 128K) we previously inserted.
+ */
+ em->start = SZ_1K * 108;
+ em->len = SZ_1K * 36;
+ em->disk_num_bytes = SZ_4K;
+ em->ram_bytes = SZ_1K * 36;
+ em->flags |= EXTENT_FLAG_COMPRESS_ZLIB;
+
+ /*
+ * Try to add the extent map but with a search range of [140K, 144K),
+ * this should succeed and adjust the extent map to the range
+ * [128K, 144K), with a length of 16K and an offset of 20K.
+ *
+ * This simulates a scenario where in the subvolume tree of an inode we
+ * have a compressed file extent item for the range [108K, 144K) and we
+ * have an overlapping compressed extent map for the range [120K, 128K),
+ * which was created by an encoded write, but its ordered extent was not
+ * yet completed, so the subvolume tree doesn't have yet the file extent
+ * item for that range - we only have the extent map in the inode's
+ * extent map tree.
+ */
+ write_lock(&em_tree->lock);
+ ret = btrfs_add_extent_mapping(inode, &em, SZ_1K * 140, SZ_4K);
+ write_unlock(&em_tree->lock);
+ free_extent_map(em);
+ if (ret < 0) {
+ test_err("couldn't add extent map for range [108K, 144K)");
+ goto out;
+ }
+
+ if (em->start != SZ_128K) {
+ test_err("unexpected extent map start %llu (should be 128K)", em->start);
+ ret = -EINVAL;
+ goto out;
+ }
+ if (em->len != SZ_16K) {
+ test_err("unexpected extent map length %llu (should be 16K)", em->len);
+ ret = -EINVAL;
+ goto out;
+ }
+ if (em->offset != SZ_1K * 20) {
+ test_err("unexpected extent map offset %llu (should be 20K)", em->offset);
+ ret = -EINVAL;
+ goto out;
+ }
+out:
+ ret2 = free_extent_map_tree(inode);
+ if (ret == 0)
+ ret = ret2;
+
+ return ret;
+}
+
struct rmap_test_vector {
u64 raid_type;
u64 physical_start;
@@ -1078,6 +1174,9 @@ int btrfs_test_extent_map(void)
ret = test_case_7(fs_info, BTRFS_I(inode));
if (ret)
goto out;
+ ret = test_case_8(fs_info, BTRFS_I(inode));
+ if (ret)
+ goto out;
test_msg("running rmap tests");
for (i = 0; i < ARRAY_SIZE(rmap_tests); i++) {
diff --git a/fs/btrfs/tree-checker.c b/fs/btrfs/tree-checker.c
index 6388786fd8b5..a825fa598e3c 100644
--- a/fs/btrfs/tree-checker.c
+++ b/fs/btrfs/tree-checker.c
@@ -634,7 +634,7 @@ static int check_dir_item(struct extent_buffer *leaf,
*/
if (key->type == BTRFS_DIR_ITEM_KEY ||
key->type == BTRFS_XATTR_ITEM_KEY) {
- char namebuf[max(BTRFS_NAME_LEN, XATTR_NAME_MAX)];
+ char namebuf[MAX(BTRFS_NAME_LEN, XATTR_NAME_MAX)];
read_extent_buffer(leaf, namebuf,
(unsigned long)(di + 1), name_len);
@@ -1289,6 +1289,19 @@ static void extent_err(const struct extent_buffer *eb, int slot,
va_end(args);
}
+static bool is_valid_dref_root(u64 rootid)
+{
+ /*
+ * The following tree root objectids are allowed to have a data backref:
+ * - subvolume trees
+ * - data reloc tree
+ * - tree root
+ * For v1 space cache
+ */
+ return is_fstree(rootid) || rootid == BTRFS_DATA_RELOC_TREE_OBJECTID ||
+ rootid == BTRFS_ROOT_TREE_OBJECTID;
+}
+
static int check_extent_item(struct extent_buffer *leaf,
struct btrfs_key *key, int slot,
struct btrfs_key *prev_key)
@@ -1441,6 +1454,8 @@ static int check_extent_item(struct extent_buffer *leaf,
struct btrfs_extent_data_ref *dref;
struct btrfs_shared_data_ref *sref;
u64 seq;
+ u64 dref_root;
+ u64 dref_objectid;
u64 dref_offset;
u64 inline_offset;
u8 inline_type;
@@ -1484,11 +1499,26 @@ static int check_extent_item(struct extent_buffer *leaf,
*/
case BTRFS_EXTENT_DATA_REF_KEY:
dref = (struct btrfs_extent_data_ref *)(&iref->offset);
+ dref_root = btrfs_extent_data_ref_root(leaf, dref);
+ dref_objectid = btrfs_extent_data_ref_objectid(leaf, dref);
dref_offset = btrfs_extent_data_ref_offset(leaf, dref);
seq = hash_extent_data_ref(
btrfs_extent_data_ref_root(leaf, dref),
btrfs_extent_data_ref_objectid(leaf, dref),
btrfs_extent_data_ref_offset(leaf, dref));
+ if (unlikely(!is_valid_dref_root(dref_root))) {
+ extent_err(leaf, slot,
+ "invalid data ref root value %llu",
+ dref_root);
+ return -EUCLEAN;
+ }
+ if (unlikely(dref_objectid < BTRFS_FIRST_FREE_OBJECTID ||
+ dref_objectid > BTRFS_LAST_FREE_OBJECTID)) {
+ extent_err(leaf, slot,
+ "invalid data ref objectid value %llu",
+ dref_root);
+ return -EUCLEAN;
+ }
if (unlikely(!IS_ALIGNED(dref_offset,
fs_info->sectorsize))) {
extent_err(leaf, slot,
@@ -1627,6 +1657,8 @@ static int check_extent_data_ref(struct extent_buffer *leaf,
return -EUCLEAN;
}
for (; ptr < end; ptr += sizeof(*dref)) {
+ u64 root;
+ u64 objectid;
u64 offset;
/*
@@ -1634,7 +1666,22 @@ static int check_extent_data_ref(struct extent_buffer *leaf,
* overflow from the leaf due to hash collisions.
*/
dref = (struct btrfs_extent_data_ref *)ptr;
+ root = btrfs_extent_data_ref_root(leaf, dref);
+ objectid = btrfs_extent_data_ref_objectid(leaf, dref);
offset = btrfs_extent_data_ref_offset(leaf, dref);
+ if (unlikely(!is_valid_dref_root(root))) {
+ extent_err(leaf, slot,
+ "invalid extent data backref root value %llu",
+ root);
+ return -EUCLEAN;
+ }
+ if (unlikely(objectid < BTRFS_FIRST_FREE_OBJECTID ||
+ objectid > BTRFS_LAST_FREE_OBJECTID)) {
+ extent_err(leaf, slot,
+ "invalid extent data backref objectid value %llu",
+ root);
+ return -EUCLEAN;
+ }
if (unlikely(!IS_ALIGNED(offset, leaf->fs_info->sectorsize))) {
extent_err(leaf, slot,
"invalid extent data backref offset, have %llu expect aligned to %u",
diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
index e98aa8219303..808c9c048276 100644
--- a/fs/ceph/caps.c
+++ b/fs/ceph/caps.c
@@ -2016,6 +2016,8 @@ bool __ceph_should_report_size(struct ceph_inode_info *ci)
* CHECK_CAPS_AUTHONLY - we should only check the auth cap
* CHECK_CAPS_FLUSH - we should flush any dirty caps immediately, without
* further delay.
+ * CHECK_CAPS_FLUSH_FORCE - we should flush any caps immediately, without
+ * further delay.
*/
void ceph_check_caps(struct ceph_inode_info *ci, int flags)
{
@@ -2097,7 +2099,7 @@ retry:
}
doutc(cl, "%p %llx.%llx file_want %s used %s dirty %s "
- "flushing %s issued %s revoking %s retain %s %s%s%s\n",
+ "flushing %s issued %s revoking %s retain %s %s%s%s%s\n",
inode, ceph_vinop(inode), ceph_cap_string(file_wanted),
ceph_cap_string(used), ceph_cap_string(ci->i_dirty_caps),
ceph_cap_string(ci->i_flushing_caps),
@@ -2105,7 +2107,8 @@ retry:
ceph_cap_string(retain),
(flags & CHECK_CAPS_AUTHONLY) ? " AUTHONLY" : "",
(flags & CHECK_CAPS_FLUSH) ? " FLUSH" : "",
- (flags & CHECK_CAPS_NOINVAL) ? " NOINVAL" : "");
+ (flags & CHECK_CAPS_NOINVAL) ? " NOINVAL" : "",
+ (flags & CHECK_CAPS_FLUSH_FORCE) ? " FLUSH_FORCE" : "");
/*
* If we no longer need to hold onto old our caps, and we may
@@ -2180,6 +2183,11 @@ retry:
queue_writeback = true;
}
+ if (flags & CHECK_CAPS_FLUSH_FORCE) {
+ doutc(cl, "force to flush caps\n");
+ goto ack;
+ }
+
if (cap == ci->i_auth_cap &&
(cap->issued & CEPH_CAP_FILE_WR)) {
/* request larger max_size from MDS? */
@@ -3510,6 +3518,8 @@ static void handle_cap_grant(struct inode *inode,
bool queue_invalidate = false;
bool deleted_inode = false;
bool fill_inline = false;
+ bool revoke_wait = false;
+ int flags = 0;
/*
* If there is at least one crypto block then we'll trust
@@ -3705,16 +3715,18 @@ static void handle_cap_grant(struct inode *inode,
ceph_cap_string(cap->issued), ceph_cap_string(newcaps),
ceph_cap_string(revoking));
if (S_ISREG(inode->i_mode) &&
- (revoking & used & CEPH_CAP_FILE_BUFFER))
+ (revoking & used & CEPH_CAP_FILE_BUFFER)) {
writeback = true; /* initiate writeback; will delay ack */
- else if (queue_invalidate &&
+ revoke_wait = true;
+ } else if (queue_invalidate &&
revoking == CEPH_CAP_FILE_CACHE &&
- (newcaps & CEPH_CAP_FILE_LAZYIO) == 0)
- ; /* do nothing yet, invalidation will be queued */
- else if (cap == ci->i_auth_cap)
+ (newcaps & CEPH_CAP_FILE_LAZYIO) == 0) {
+ revoke_wait = true; /* do nothing yet, invalidation will be queued */
+ } else if (cap == ci->i_auth_cap) {
check_caps = 1; /* check auth cap only */
- else
+ } else {
check_caps = 2; /* check all caps */
+ }
/* If there is new caps, try to wake up the waiters */
if (~cap->issued & newcaps)
wake = true;
@@ -3741,8 +3753,9 @@ static void handle_cap_grant(struct inode *inode,
BUG_ON(cap->issued & ~cap->implemented);
/* don't let check_caps skip sending a response to MDS for revoke msgs */
- if (le32_to_cpu(grant->op) == CEPH_CAP_OP_REVOKE) {
+ if (!revoke_wait && le32_to_cpu(grant->op) == CEPH_CAP_OP_REVOKE) {
cap->mds_wanted = 0;
+ flags |= CHECK_CAPS_FLUSH_FORCE;
if (cap == ci->i_auth_cap)
check_caps = 1; /* check auth cap only */
else
@@ -3798,9 +3811,9 @@ static void handle_cap_grant(struct inode *inode,
mutex_unlock(&session->s_mutex);
if (check_caps == 1)
- ceph_check_caps(ci, CHECK_CAPS_AUTHONLY | CHECK_CAPS_NOINVAL);
+ ceph_check_caps(ci, flags | CHECK_CAPS_AUTHONLY | CHECK_CAPS_NOINVAL);
else if (check_caps == 2)
- ceph_check_caps(ci, CHECK_CAPS_NOINVAL);
+ ceph_check_caps(ci, flags | CHECK_CAPS_NOINVAL);
}
/*
diff --git a/fs/ceph/super.h b/fs/ceph/super.h
index b63b4cd9b5b6..6e817bf1337c 100644
--- a/fs/ceph/super.h
+++ b/fs/ceph/super.h
@@ -200,9 +200,10 @@ struct ceph_cap {
struct list_head caps_item;
};
-#define CHECK_CAPS_AUTHONLY 1 /* only check auth cap */
-#define CHECK_CAPS_FLUSH 2 /* flush any dirty caps */
-#define CHECK_CAPS_NOINVAL 4 /* don't invalidate pagecache */
+#define CHECK_CAPS_AUTHONLY 1 /* only check auth cap */
+#define CHECK_CAPS_FLUSH 2 /* flush any dirty caps */
+#define CHECK_CAPS_NOINVAL 4 /* don't invalidate pagecache */
+#define CHECK_CAPS_FLUSH_FORCE 8 /* force flush any caps */
struct ceph_cap_flush {
u64 tid;
diff --git a/fs/file.c b/fs/file.c
index a3b72aa64f11..a11e59b5d602 100644
--- a/fs/file.c
+++ b/fs/file.c
@@ -1248,6 +1248,7 @@ __releases(&files->file_lock)
* tables and this condition does not arise without those.
*/
fdt = files_fdtable(files);
+ fd = array_index_nospec(fd, fdt->max_fds);
tofree = fdt->fd[fd];
if (!tofree && fd_is_open(fd, fdt))
goto Ebusy;
diff --git a/fs/pstore/platform.c b/fs/pstore/platform.c
index 3497ede88aa0..9c6b7c97fa3c 100644
--- a/fs/pstore/platform.c
+++ b/fs/pstore/platform.c
@@ -275,7 +275,7 @@ void pstore_record_init(struct pstore_record *record,
* end of the buffer.
*/
static void pstore_dump(struct kmsg_dumper *dumper,
- enum kmsg_dump_reason reason)
+ struct kmsg_dump_detail *detail)
{
struct kmsg_dump_iter iter;
unsigned long total = 0;
@@ -285,9 +285,9 @@ static void pstore_dump(struct kmsg_dumper *dumper,
int saved_ret = 0;
int ret;
- why = kmsg_dump_reason_str(reason);
+ why = kmsg_dump_reason_str(detail->reason);
- if (pstore_cannot_block_path(reason)) {
+ if (pstore_cannot_block_path(detail->reason)) {
if (!spin_trylock_irqsave(&psinfo->buf_lock, flags)) {
pr_err("dump skipped in %s path because of concurrent dump\n",
in_nmi() ? "NMI" : why);
@@ -311,7 +311,7 @@ static void pstore_dump(struct kmsg_dumper *dumper,
pstore_record_init(&record, psinfo);
record.type = PSTORE_TYPE_DMESG;
record.count = oopscount;
- record.reason = reason;
+ record.reason = detail->reason;
record.part = part;
record.buf = psinfo->buf;
@@ -352,7 +352,7 @@ static void pstore_dump(struct kmsg_dumper *dumper,
}
ret = psinfo->write(&record);
- if (ret == 0 && reason == KMSG_DUMP_OOPS) {
+ if (ret == 0 && detail->reason == KMSG_DUMP_OOPS) {
pstore_new_entry = 1;
pstore_timer_kick();
} else {
diff --git a/fs/smb/client/cifsfs.h b/fs/smb/client/cifsfs.h
index 62d5fee3e5eb..ca2bd204bcc5 100644
--- a/fs/smb/client/cifsfs.h
+++ b/fs/smb/client/cifsfs.h
@@ -147,6 +147,6 @@ extern const struct export_operations cifs_export_ops;
#endif /* CONFIG_CIFS_NFSD_EXPORT */
/* when changing internal version - update following two lines at same time */
-#define SMB3_PRODUCT_BUILD 49
-#define CIFS_VERSION "2.49"
+#define SMB3_PRODUCT_BUILD 50
+#define CIFS_VERSION "2.50"
#endif /* _CIFSFS_H */
diff --git a/fs/smb/client/cifsglob.h b/fs/smb/client/cifsglob.h
index 8e86fec7dcd2..f6d1f075987f 100644
--- a/fs/smb/client/cifsglob.h
+++ b/fs/smb/client/cifsglob.h
@@ -1471,29 +1471,6 @@ struct cifs_io_parms {
struct TCP_Server_Info *server;
};
-struct cifs_aio_ctx {
- struct kref refcount;
- struct list_head list;
- struct mutex aio_mutex;
- struct completion done;
- struct iov_iter iter;
- struct kiocb *iocb;
- struct cifsFileInfo *cfile;
- struct bio_vec *bv;
- loff_t pos;
- unsigned int nr_pinned_pages;
- ssize_t rc;
- unsigned int len;
- unsigned int total_len;
- unsigned int bv_need_unpin; /* If ->bv[] needs unpinning */
- bool should_dirty;
- /*
- * Indicates if this aio_ctx is for direct_io,
- * If yes, iter is a copy of the user passed iov_iter
- */
- bool direct_io;
-};
-
struct cifs_io_request {
struct netfs_io_request rreq;
struct cifsFileInfo *cfile;
@@ -2010,7 +1987,6 @@ require use of the stronger protocol */
* cifsFileInfo->file_info_lock cifsFileInfo->count cifs_new_fileinfo
* ->invalidHandle initiate_cifs_search
* ->oplock_break_cancelled
- * cifs_aio_ctx->aio_mutex cifs_aio_ctx cifs_aio_ctx_alloc
****************************************************************************/
#ifdef DECLARE_GLOBALS_HERE
diff --git a/fs/smb/client/cifsproto.h b/fs/smb/client/cifsproto.h
index c15bb5ee7eb7..497bf3c447bc 100644
--- a/fs/smb/client/cifsproto.h
+++ b/fs/smb/client/cifsproto.h
@@ -619,8 +619,6 @@ int __cifs_calc_signature(struct smb_rqst *rqst,
struct shash_desc *shash);
enum securityEnum cifs_select_sectype(struct TCP_Server_Info *,
enum securityEnum);
-struct cifs_aio_ctx *cifs_aio_ctx_alloc(void);
-void cifs_aio_ctx_release(struct kref *refcount);
int cifs_alloc_hash(const char *name, struct shash_desc **sdesc);
void cifs_free_hash(struct shash_desc **sdesc);
diff --git a/fs/smb/client/inode.c b/fs/smb/client/inode.c
index 4a8aa1de9522..dd0afa23734c 100644
--- a/fs/smb/client/inode.c
+++ b/fs/smb/client/inode.c
@@ -1042,13 +1042,26 @@ static int reparse_info_to_fattr(struct cifs_open_info_data *data,
}
rc = -EOPNOTSUPP;
- switch ((data->reparse.tag = tag)) {
- case 0: /* SMB1 symlink */
+ data->reparse.tag = tag;
+ if (!data->reparse.tag) {
if (server->ops->query_symlink) {
rc = server->ops->query_symlink(xid, tcon,
cifs_sb, full_path,
&data->symlink_target);
}
+ if (rc == -EOPNOTSUPP)
+ data->reparse.tag = IO_REPARSE_TAG_INTERNAL;
+ }
+
+ switch (data->reparse.tag) {
+ case 0: /* SMB1 symlink */
+ break;
+ case IO_REPARSE_TAG_INTERNAL:
+ rc = 0;
+ if (le32_to_cpu(data->fi.Attributes) & ATTR_DIRECTORY) {
+ cifs_create_junction_fattr(fattr, sb);
+ goto out;
+ }
break;
case IO_REPARSE_TAG_MOUNT_POINT:
cifs_create_junction_fattr(fattr, sb);
diff --git a/fs/smb/client/ioctl.c b/fs/smb/client/ioctl.c
index 855ac5a62edf..44dbaf9929a4 100644
--- a/fs/smb/client/ioctl.c
+++ b/fs/smb/client/ioctl.c
@@ -170,7 +170,10 @@ static long smb_mnt_get_fsinfo(unsigned int xid, struct cifs_tcon *tcon,
static int cifs_shutdown(struct super_block *sb, unsigned long arg)
{
struct cifs_sb_info *sbi = CIFS_SB(sb);
+ struct tcon_link *tlink;
+ struct cifs_tcon *tcon;
__u32 flags;
+ int rc;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
@@ -178,14 +181,21 @@ static int cifs_shutdown(struct super_block *sb, unsigned long arg)
if (get_user(flags, (__u32 __user *)arg))
return -EFAULT;
- if (flags > CIFS_GOING_FLAGS_NOLOGFLUSH)
- return -EINVAL;
+ tlink = cifs_sb_tlink(sbi);
+ if (IS_ERR(tlink))
+ return PTR_ERR(tlink);
+ tcon = tlink_tcon(tlink);
+
+ trace_smb3_shutdown_enter(flags, tcon->tid);
+ if (flags > CIFS_GOING_FLAGS_NOLOGFLUSH) {
+ rc = -EINVAL;
+ goto shutdown_out_err;
+ }
if (cifs_forced_shutdown(sbi))
- return 0;
+ goto shutdown_good;
cifs_dbg(VFS, "shut down requested (%d)", flags);
-/* trace_cifs_shutdown(sb, flags);*/
/*
* see:
@@ -201,7 +211,8 @@ static int cifs_shutdown(struct super_block *sb, unsigned long arg)
*/
case CIFS_GOING_FLAGS_DEFAULT:
cifs_dbg(FYI, "shutdown with default flag not supported\n");
- return -EINVAL;
+ rc = -EINVAL;
+ goto shutdown_out_err;
/*
* FLAGS_LOGFLUSH is easy since it asks to write out metadata (not
* data) but metadata writes are not cached on the client, so can treat
@@ -210,11 +221,18 @@ static int cifs_shutdown(struct super_block *sb, unsigned long arg)
case CIFS_GOING_FLAGS_LOGFLUSH:
case CIFS_GOING_FLAGS_NOLOGFLUSH:
sbi->mnt_cifs_flags |= CIFS_MOUNT_SHUTDOWN;
- return 0;
+ goto shutdown_good;
default:
- return -EINVAL;
+ rc = -EINVAL;
+ goto shutdown_out_err;
}
+
+shutdown_good:
+ trace_smb3_shutdown_done(flags, tcon->tid);
return 0;
+shutdown_out_err:
+ trace_smb3_shutdown_err(rc, flags, tcon->tid);
+ return rc;
}
static int cifs_dump_full_key(struct cifs_tcon *tcon, struct smb3_full_key_debug_info __user *in)
diff --git a/fs/smb/client/misc.c b/fs/smb/client/misc.c
index 07c468ddb88a..b28ff62f1f15 100644
--- a/fs/smb/client/misc.c
+++ b/fs/smb/client/misc.c
@@ -995,60 +995,6 @@ parse_DFS_referrals_exit:
return rc;
}
-struct cifs_aio_ctx *
-cifs_aio_ctx_alloc(void)
-{
- struct cifs_aio_ctx *ctx;
-
- /*
- * Must use kzalloc to initialize ctx->bv to NULL and ctx->direct_io
- * to false so that we know when we have to unreference pages within
- * cifs_aio_ctx_release()
- */
- ctx = kzalloc(sizeof(struct cifs_aio_ctx), GFP_KERNEL);
- if (!ctx)
- return NULL;
-
- INIT_LIST_HEAD(&ctx->list);
- mutex_init(&ctx->aio_mutex);
- init_completion(&ctx->done);
- kref_init(&ctx->refcount);
- return ctx;
-}
-
-void
-cifs_aio_ctx_release(struct kref *refcount)
-{
- struct cifs_aio_ctx *ctx = container_of(refcount,
- struct cifs_aio_ctx, refcount);
-
- cifsFileInfo_put(ctx->cfile);
-
- /*
- * ctx->bv is only set if setup_aio_ctx_iter() was call successfuly
- * which means that iov_iter_extract_pages() was a success and thus
- * that we may have references or pins on pages that we need to
- * release.
- */
- if (ctx->bv) {
- if (ctx->should_dirty || ctx->bv_need_unpin) {
- unsigned int i;
-
- for (i = 0; i < ctx->nr_pinned_pages; i++) {
- struct page *page = ctx->bv[i].bv_page;
-
- if (ctx->should_dirty)
- set_page_dirty(page);
- if (ctx->bv_need_unpin)
- unpin_user_page(page);
- }
- }
- kvfree(ctx->bv);
- }
-
- kfree(ctx);
-}
-
/**
* cifs_alloc_hash - allocate hash and hash context together
* @name: The name of the crypto hash algo
diff --git a/fs/smb/client/reparse.c b/fs/smb/client/reparse.c
index a0ffbda90733..689d8a506d45 100644
--- a/fs/smb/client/reparse.c
+++ b/fs/smb/client/reparse.c
@@ -505,6 +505,10 @@ bool cifs_reparse_point_to_fattr(struct cifs_sb_info *cifs_sb,
}
switch (tag) {
+ case IO_REPARSE_TAG_INTERNAL:
+ if (!(fattr->cf_cifsattrs & ATTR_DIRECTORY))
+ return false;
+ fallthrough;
case IO_REPARSE_TAG_DFS:
case IO_REPARSE_TAG_DFSR:
case IO_REPARSE_TAG_MOUNT_POINT:
diff --git a/fs/smb/client/reparse.h b/fs/smb/client/reparse.h
index 6b55d1df9e2f..2c0644bc4e65 100644
--- a/fs/smb/client/reparse.h
+++ b/fs/smb/client/reparse.h
@@ -12,6 +12,12 @@
#include "fs_context.h"
#include "cifsglob.h"
+/*
+ * Used only by cifs.ko to ignore reparse points from files when client or
+ * server doesn't support FSCTL_GET_REPARSE_POINT.
+ */
+#define IO_REPARSE_TAG_INTERNAL ((__u32)~0U)
+
static inline dev_t reparse_nfs_mkdev(struct reparse_posix_data *buf)
{
u64 v = le64_to_cpu(*(__le64 *)buf->DataBuffer);
@@ -78,10 +84,19 @@ static inline u32 reparse_mode_wsl_tag(mode_t mode)
static inline bool reparse_inode_match(struct inode *inode,
struct cifs_fattr *fattr)
{
+ struct cifsInodeInfo *cinode = CIFS_I(inode);
struct timespec64 ctime = inode_get_ctime(inode);
- return (CIFS_I(inode)->cifsAttrs & ATTR_REPARSE) &&
- CIFS_I(inode)->reparse_tag == fattr->cf_cifstag &&
+ /*
+ * Do not match reparse tags when client or server doesn't support
+ * FSCTL_GET_REPARSE_POINT. @fattr->cf_cifstag should contain correct
+ * reparse tag from query dir response but the client won't be able to
+ * read the reparse point data anyway. This spares us a revalidation.
+ */
+ if (cinode->reparse_tag != IO_REPARSE_TAG_INTERNAL &&
+ cinode->reparse_tag != fattr->cf_cifstag)
+ return false;
+ return (cinode->cifsAttrs & ATTR_REPARSE) &&
timespec64_equal(&ctime, &fattr->cf_ctime);
}
diff --git a/fs/smb/client/smb2inode.c b/fs/smb/client/smb2inode.c
index 5c02a12251c8..9f5bc41433c1 100644
--- a/fs/smb/client/smb2inode.c
+++ b/fs/smb/client/smb2inode.c
@@ -930,6 +930,8 @@ int smb2_query_path_info(const unsigned int xid,
switch (rc) {
case 0:
+ rc = parse_create_response(data, cifs_sb, &out_iov[0]);
+ break;
case -EOPNOTSUPP:
/*
* BB TODO: When support for special files added to Samba
@@ -948,7 +950,8 @@ int smb2_query_path_info(const unsigned int xid,
cmds[num_cmds++] = SMB2_OP_GET_REPARSE;
oparms = CIFS_OPARMS(cifs_sb, tcon, full_path,
- FILE_READ_ATTRIBUTES | FILE_READ_EA,
+ FILE_READ_ATTRIBUTES |
+ FILE_READ_EA | SYNCHRONIZE,
FILE_OPEN, create_options |
OPEN_REPARSE_POINT, ACL_NO_MODE);
cifs_get_readable_path(tcon, full_path, &cfile);
@@ -1256,7 +1259,8 @@ int smb2_query_reparse_point(const unsigned int xid,
cifs_dbg(FYI, "%s: path: %s\n", __func__, full_path);
cifs_get_readable_path(tcon, full_path, &cfile);
- oparms = CIFS_OPARMS(cifs_sb, tcon, full_path, FILE_READ_ATTRIBUTES,
+ oparms = CIFS_OPARMS(cifs_sb, tcon, full_path,
+ FILE_READ_ATTRIBUTES | FILE_READ_EA | SYNCHRONIZE,
FILE_OPEN, OPEN_REPARSE_POINT, ACL_NO_MODE);
rc = smb2_compound_op(xid, tcon, cifs_sb,
full_path, &oparms, &in_iov,
diff --git a/fs/smb/client/trace.h b/fs/smb/client/trace.h
index 6b3bdfb97211..0f0c10c7ada7 100644
--- a/fs/smb/client/trace.h
+++ b/fs/smb/client/trace.h
@@ -1388,7 +1388,7 @@ DECLARE_EVENT_CLASS(smb3_ioctl_class,
__entry->command = command;
),
TP_printk("xid=%u fid=0x%llx ioctl cmd=0x%x",
- __entry->xid, __entry->fid, __entry->command)
+ __entry->xid, __entry->fid, __entry->command)
)
#define DEFINE_SMB3_IOCTL_EVENT(name) \
@@ -1400,9 +1400,58 @@ DEFINE_EVENT(smb3_ioctl_class, smb3_##name, \
DEFINE_SMB3_IOCTL_EVENT(ioctl);
+DECLARE_EVENT_CLASS(smb3_shutdown_class,
+ TP_PROTO(__u32 flags,
+ __u32 tid),
+ TP_ARGS(flags, tid),
+ TP_STRUCT__entry(
+ __field(__u32, flags)
+ __field(__u32, tid)
+ ),
+ TP_fast_assign(
+ __entry->flags = flags;
+ __entry->tid = tid;
+ ),
+ TP_printk("flags=0x%x tid=0x%x",
+ __entry->flags, __entry->tid)
+)
+
+#define DEFINE_SMB3_SHUTDOWN_EVENT(name) \
+DEFINE_EVENT(smb3_shutdown_class, smb3_##name, \
+ TP_PROTO(__u32 flags, \
+ __u32 tid), \
+ TP_ARGS(flags, tid))
+
+DEFINE_SMB3_SHUTDOWN_EVENT(shutdown_enter);
+DEFINE_SMB3_SHUTDOWN_EVENT(shutdown_done);
+DECLARE_EVENT_CLASS(smb3_shutdown_err_class,
+ TP_PROTO(int rc,
+ __u32 flags,
+ __u32 tid),
+ TP_ARGS(rc, flags, tid),
+ TP_STRUCT__entry(
+ __field(int, rc)
+ __field(__u32, flags)
+ __field(__u32, tid)
+ ),
+ TP_fast_assign(
+ __entry->rc = rc;
+ __entry->flags = flags;
+ __entry->tid = tid;
+ ),
+ TP_printk("rc=%d flags=0x%x tid=0x%x",
+ __entry->rc, __entry->flags, __entry->tid)
+)
+#define DEFINE_SMB3_SHUTDOWN_ERR_EVENT(name) \
+DEFINE_EVENT(smb3_shutdown_err_class, smb3_##name, \
+ TP_PROTO(int rc, \
+ __u32 flags, \
+ __u32 tid), \
+ TP_ARGS(rc, flags, tid))
+DEFINE_SMB3_SHUTDOWN_ERR_EVENT(shutdown_err);
DECLARE_EVENT_CLASS(smb3_credit_class,
TP_PROTO(__u64 currmid,
diff --git a/fs/xfs/libxfs/xfs_quota_defs.h b/fs/xfs/libxfs/xfs_quota_defs.h
index cb035da3f990..fb05f44f6c75 100644
--- a/fs/xfs/libxfs/xfs_quota_defs.h
+++ b/fs/xfs/libxfs/xfs_quota_defs.h
@@ -56,7 +56,7 @@ typedef uint8_t xfs_dqtype_t;
* And, of course, we also need to take into account the dquot log format item
* used to describe each dquot.
*/
-#define XFS_DQUOT_LOGRES(mp) \
+#define XFS_DQUOT_LOGRES \
((sizeof(struct xfs_dq_logformat) + sizeof(struct xfs_disk_dquot)) * 6)
#define XFS_IS_QUOTA_ON(mp) ((mp)->m_qflags & XFS_ALL_QUOTA_ACCT)
diff --git a/fs/xfs/libxfs/xfs_trans_resv.c b/fs/xfs/libxfs/xfs_trans_resv.c
index 3dc8f785bf29..45aaf169806a 100644
--- a/fs/xfs/libxfs/xfs_trans_resv.c
+++ b/fs/xfs/libxfs/xfs_trans_resv.c
@@ -338,11 +338,11 @@ xfs_calc_write_reservation(
blksz);
t1 += adj;
t3 += adj;
- return XFS_DQUOT_LOGRES(mp) + max3(t1, t2, t3);
+ return XFS_DQUOT_LOGRES + max3(t1, t2, t3);
}
t4 = xfs_calc_refcountbt_reservation(mp, 1);
- return XFS_DQUOT_LOGRES(mp) + max(t4, max3(t1, t2, t3));
+ return XFS_DQUOT_LOGRES + max(t4, max3(t1, t2, t3));
}
unsigned int
@@ -410,11 +410,11 @@ xfs_calc_itruncate_reservation(
xfs_refcountbt_block_count(mp, 4),
blksz);
- return XFS_DQUOT_LOGRES(mp) + max3(t1, t2, t3);
+ return XFS_DQUOT_LOGRES + max3(t1, t2, t3);
}
t4 = xfs_calc_refcountbt_reservation(mp, 2);
- return XFS_DQUOT_LOGRES(mp) + max(t4, max3(t1, t2, t3));
+ return XFS_DQUOT_LOGRES + max(t4, max3(t1, t2, t3));
}
unsigned int
@@ -466,7 +466,7 @@ STATIC uint
xfs_calc_rename_reservation(
struct xfs_mount *mp)
{
- unsigned int overhead = XFS_DQUOT_LOGRES(mp);
+ unsigned int overhead = XFS_DQUOT_LOGRES;
struct xfs_trans_resv *resp = M_RES(mp);
unsigned int t1, t2, t3 = 0;
@@ -577,7 +577,7 @@ STATIC uint
xfs_calc_link_reservation(
struct xfs_mount *mp)
{
- unsigned int overhead = XFS_DQUOT_LOGRES(mp);
+ unsigned int overhead = XFS_DQUOT_LOGRES;
struct xfs_trans_resv *resp = M_RES(mp);
unsigned int t1, t2, t3 = 0;
@@ -641,7 +641,7 @@ STATIC uint
xfs_calc_remove_reservation(
struct xfs_mount *mp)
{
- unsigned int overhead = XFS_DQUOT_LOGRES(mp);
+ unsigned int overhead = XFS_DQUOT_LOGRES;
struct xfs_trans_resv *resp = M_RES(mp);
unsigned int t1, t2, t3 = 0;
@@ -729,7 +729,7 @@ xfs_calc_icreate_reservation(
struct xfs_mount *mp)
{
struct xfs_trans_resv *resp = M_RES(mp);
- unsigned int overhead = XFS_DQUOT_LOGRES(mp);
+ unsigned int overhead = XFS_DQUOT_LOGRES;
unsigned int t1, t2, t3 = 0;
t1 = xfs_calc_icreate_resv_alloc(mp);
@@ -747,7 +747,7 @@ STATIC uint
xfs_calc_create_tmpfile_reservation(
struct xfs_mount *mp)
{
- uint res = XFS_DQUOT_LOGRES(mp);
+ uint res = XFS_DQUOT_LOGRES;
res += xfs_calc_icreate_resv_alloc(mp);
return res + xfs_calc_iunlink_add_reservation(mp);
@@ -829,7 +829,7 @@ STATIC uint
xfs_calc_ifree_reservation(
struct xfs_mount *mp)
{
- return XFS_DQUOT_LOGRES(mp) +
+ return XFS_DQUOT_LOGRES +
xfs_calc_inode_res(mp, 1) +
xfs_calc_buf_res(3, mp->m_sb.sb_sectsize) +
xfs_calc_iunlink_remove_reservation(mp) +
@@ -846,7 +846,7 @@ STATIC uint
xfs_calc_ichange_reservation(
struct xfs_mount *mp)
{
- return XFS_DQUOT_LOGRES(mp) +
+ return XFS_DQUOT_LOGRES +
xfs_calc_inode_res(mp, 1) +
xfs_calc_buf_res(1, mp->m_sb.sb_sectsize);
@@ -955,7 +955,7 @@ STATIC uint
xfs_calc_addafork_reservation(
struct xfs_mount *mp)
{
- return XFS_DQUOT_LOGRES(mp) +
+ return XFS_DQUOT_LOGRES +
xfs_calc_inode_res(mp, 1) +
xfs_calc_buf_res(2, mp->m_sb.sb_sectsize) +
xfs_calc_buf_res(1, mp->m_dir_geo->blksize) +
@@ -1003,7 +1003,7 @@ STATIC uint
xfs_calc_attrsetm_reservation(
struct xfs_mount *mp)
{
- return XFS_DQUOT_LOGRES(mp) +
+ return XFS_DQUOT_LOGRES +
xfs_calc_inode_res(mp, 1) +
xfs_calc_buf_res(1, mp->m_sb.sb_sectsize) +
xfs_calc_buf_res(XFS_DA_NODE_MAXDEPTH, XFS_FSB_TO_B(mp, 1));
@@ -1043,7 +1043,7 @@ STATIC uint
xfs_calc_attrrm_reservation(
struct xfs_mount *mp)
{
- return XFS_DQUOT_LOGRES(mp) +
+ return XFS_DQUOT_LOGRES +
max((xfs_calc_inode_res(mp, 1) +
xfs_calc_buf_res(XFS_DA_NODE_MAXDEPTH,
XFS_FSB_TO_B(mp, 1)) +
diff --git a/fs/xfs/scrub/agheader_repair.c b/fs/xfs/scrub/agheader_repair.c
index 0dbc484b182f..2f98d90d7fd6 100644
--- a/fs/xfs/scrub/agheader_repair.c
+++ b/fs/xfs/scrub/agheader_repair.c
@@ -696,7 +696,7 @@ xrep_agfl_init_header(
* step.
*/
xagb_bitmap_init(&af.used_extents);
- af.agfl_bno = xfs_buf_to_agfl_bno(agfl_bp),
+ af.agfl_bno = xfs_buf_to_agfl_bno(agfl_bp);
xagb_bitmap_walk(agfl_extents, xrep_agfl_fill, &af);
error = xagb_bitmap_disunion(agfl_extents, &af.used_extents);
if (error)
diff --git a/fs/xfs/scrub/parent.c b/fs/xfs/scrub/parent.c
index 733c410a2279..91e7b51ce068 100644
--- a/fs/xfs/scrub/parent.c
+++ b/fs/xfs/scrub/parent.c
@@ -799,7 +799,7 @@ xchk_parent_pptr(
}
if (pp->sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
- goto out_pp;
+ goto out_names;
/*
* Complain if the number of parent pointers doesn't match the link
diff --git a/fs/xfs/scrub/trace.h b/fs/xfs/scrub/trace.h
index 92ef4cdc486e..c886d5d0eb02 100644
--- a/fs/xfs/scrub/trace.h
+++ b/fs/xfs/scrub/trace.h
@@ -959,18 +959,16 @@ TRACE_EVENT(xfile_create,
TP_STRUCT__entry(
__field(dev_t, dev)
__field(unsigned long, ino)
- __array(char, pathname, 256)
+ __array(char, pathname, MAXNAMELEN)
),
TP_fast_assign(
- char pathname[257];
char *path;
__entry->ino = file_inode(xf->file)->i_ino;
- memset(pathname, 0, sizeof(pathname));
- path = file_path(xf->file, pathname, sizeof(pathname) - 1);
+ path = file_path(xf->file, __entry->pathname, MAXNAMELEN);
if (IS_ERR(path))
- path = "(unknown)";
- strncpy(__entry->pathname, path, sizeof(__entry->pathname));
+ strncpy(__entry->pathname, "(unknown)",
+ sizeof(__entry->pathname));
),
TP_printk("xfino 0x%lx path '%s'",
__entry->ino,
diff --git a/fs/xfs/xfs_attr_list.c b/fs/xfs/xfs_attr_list.c
index 5c947e5ce8b8..7db386304875 100644
--- a/fs/xfs/xfs_attr_list.c
+++ b/fs/xfs/xfs_attr_list.c
@@ -139,7 +139,7 @@ xfs_attr_shortform_list(
sbp->name = sfe->nameval;
sbp->namelen = sfe->namelen;
/* These are bytes, and both on-disk, don't endian-flip */
- sbp->value = &sfe->nameval[sfe->namelen],
+ sbp->value = &sfe->nameval[sfe->namelen];
sbp->valuelen = sfe->valuelen;
sbp->flags = sfe->flags;
sbp->hash = xfs_attr_hashval(dp->i_mount, sfe->flags,
diff --git a/fs/xfs/xfs_trace.h b/fs/xfs/xfs_trace.h
index 5646d300b286..180ce697305a 100644
--- a/fs/xfs/xfs_trace.h
+++ b/fs/xfs/xfs_trace.h
@@ -4715,20 +4715,18 @@ TRACE_EVENT(xmbuf_create,
TP_STRUCT__entry(
__field(dev_t, dev)
__field(unsigned long, ino)
- __array(char, pathname, 256)
+ __array(char, pathname, MAXNAMELEN)
),
TP_fast_assign(
- char pathname[257];
char *path;
struct file *file = btp->bt_file;
__entry->dev = btp->bt_mount->m_super->s_dev;
__entry->ino = file_inode(file)->i_ino;
- memset(pathname, 0, sizeof(pathname));
- path = file_path(file, pathname, sizeof(pathname) - 1);
+ path = file_path(file, __entry->pathname, MAXNAMELEN);
if (IS_ERR(path))
- path = "(unknown)";
- strncpy(__entry->pathname, path, sizeof(__entry->pathname));
+ strncpy(__entry->pathname, "(unknown)",
+ sizeof(__entry->pathname));
),
TP_printk("dev %d:%d xmino 0x%lx path '%s'",
MAJOR(__entry->dev), MINOR(__entry->dev),
diff --git a/fs/xfs/xfs_xattr.c b/fs/xfs/xfs_xattr.c
index ab3d22f662f2..eaf849260bd6 100644
--- a/fs/xfs/xfs_xattr.c
+++ b/fs/xfs/xfs_xattr.c
@@ -110,7 +110,24 @@ xfs_attr_change(
args->whichfork = XFS_ATTR_FORK;
xfs_attr_sethash(args);
- return xfs_attr_set(args, op, args->attr_filter & XFS_ATTR_ROOT);
+ /*
+ * Some xattrs must be resistant to allocation failure at ENOSPC, e.g.
+ * creating an inode with ACLs or security attributes requires the
+ * allocation of the xattr holding that information to succeed. Hence
+ * we allow xattrs in the VFS TRUSTED, SYSTEM, POSIX_ACL and SECURITY
+ * (LSM xattr) namespaces to dip into the reserve block pool to allow
+ * manipulation of these xattrs when at ENOSPC. These VFS xattr
+ * namespaces translate to the XFS_ATTR_ROOT and XFS_ATTR_SECURE on-disk
+ * namespaces.
+ *
+ * For most of these cases, these special xattrs will fit in the inode
+ * itself and so consume no extra space or only require temporary extra
+ * space while an overwrite is being made. Hence the use of the reserved
+ * pool is largely to avoid the worst case reservation from preventing
+ * the xattr from being created at ENOSPC.
+ */
+ return xfs_attr_set(args, op,
+ args->attr_filter & (XFS_ATTR_ROOT | XFS_ATTR_SECURE));
}
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index ad6afc5c4918..1ae44793132a 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -911,13 +911,12 @@
#define CON_INITCALL \
BOUNDED_SECTION_POST_LABEL(.con_initcall.init, __con_initcall, _start, _end)
-#define RUNTIME_NAME(t,x) runtime_##t##_##x
+#define NAMED_SECTION(name) \
+ . = ALIGN(8); \
+ name : AT(ADDR(name) - LOAD_OFFSET) \
+ { BOUNDED_SECTION_PRE_LABEL(name, name, __start_, __stop_) }
-#define RUNTIME_CONST(t,x) \
- . = ALIGN(8); \
- RUNTIME_NAME(t,x) : AT(ADDR(RUNTIME_NAME(t,x)) - LOAD_OFFSET) { \
- *(RUNTIME_NAME(t,x)); \
- }
+#define RUNTIME_CONST(t,x) NAMED_SECTION(runtime_##t##_##x)
/* Alignment must be consistent with (kunit_suite *) in include/kunit/test.h */
#define KUNIT_TABLE() \
diff --git a/include/drm/display/drm_dp.h b/include/drm/display/drm_dp.h
index 173548c6473a..a6f8b098c56f 100644
--- a/include/drm/display/drm_dp.h
+++ b/include/drm/display/drm_dp.h
@@ -1543,6 +1543,10 @@ enum drm_dp_phy {
#define DP_SYMBOL_ERROR_COUNT_LANE2_PHY_REPEATER1 0xf0039 /* 1.3 */
#define DP_SYMBOL_ERROR_COUNT_LANE3_PHY_REPEATER1 0xf003b /* 1.3 */
+#define DP_OUI_PHY_REPEATER1 0xf003d /* 1.3 */
+#define DP_OUI_PHY_REPEATER(dp_phy) \
+ DP_LTTPR_REG(dp_phy, DP_OUI_PHY_REPEATER1)
+
#define __DP_FEC1_BASE 0xf0290 /* 1.4 */
#define __DP_FEC2_BASE 0xf0298 /* 1.4 */
#define DP_FEC_BASE(dp_phy) \
diff --git a/include/drm/display/drm_dp_helper.h b/include/drm/display/drm_dp_helper.h
index ea03e1dd26ba..279624833ea9 100644
--- a/include/drm/display/drm_dp_helper.h
+++ b/include/drm/display/drm_dp_helper.h
@@ -112,6 +112,7 @@ struct drm_dp_vsc_sdp {
* @target_rr: Target Refresh
* @duration_incr_ms: Successive frame duration increase
* @duration_decr_ms: Successive frame duration decrease
+ * @target_rr_divider: Target refresh rate divider
* @mode: Adaptive Sync Operation Mode
*/
struct drm_dp_as_sdp {
@@ -657,6 +658,8 @@ struct drm_dp_desc {
int drm_dp_read_desc(struct drm_dp_aux *aux, struct drm_dp_desc *desc,
bool is_branch);
+int drm_dp_dump_lttpr_desc(struct drm_dp_aux *aux, enum drm_dp_phy dp_phy);
+
/**
* enum drm_dp_quirk - Display Port sink/branch device specific quirks
*
diff --git a/include/drm/drm_connector.h b/include/drm/drm_connector.h
index c754651044d4..e3fa43291f44 100644
--- a/include/drm/drm_connector.h
+++ b/include/drm/drm_connector.h
@@ -471,14 +471,6 @@ enum drm_privacy_screen_status {
*
* DP definitions come from the DP v2.0 spec
* HDMI definitions come from the CTA-861-H spec
- *
- * A note on YCC and RGB variants:
- *
- * Since userspace is not aware of the encoding on the wire
- * (RGB or YCbCr), drivers are free to pick the appropriate
- * variant, regardless of what userspace selects. E.g., if
- * BT2020_RGB is selected by userspace a driver will pick
- * BT2020_YCC if the encoding on the wire is YUV444 or YUV420.
*
* @DRM_MODE_COLORIMETRY_DEFAULT:
* Driver specific behavior.
diff --git a/include/drm/drm_device.h b/include/drm/drm_device.h
index 63767cf24371..c91f87b5242d 100644
--- a/include/drm/drm_device.h
+++ b/include/drm/drm_device.h
@@ -213,8 +213,9 @@ struct drm_device {
* This can be set to true it the hardware has a working vblank counter
* with high-precision timestamping (otherwise there are races) and the
* driver uses drm_crtc_vblank_on() and drm_crtc_vblank_off()
- * appropriately. See also @max_vblank_count and
- * &drm_crtc_funcs.get_vblank_counter.
+ * appropriately. Also, see @max_vblank_count,
+ * &drm_crtc_funcs.get_vblank_counter and
+ * &drm_vblank_crtc_config.disable_immediate.
*/
bool vblank_disable_immediate;
diff --git a/include/drm/drm_vblank.h b/include/drm/drm_vblank.h
index c8f829b4307c..151ab1e85b1b 100644
--- a/include/drm/drm_vblank.h
+++ b/include/drm/drm_vblank.h
@@ -79,6 +79,31 @@ struct drm_pending_vblank_event {
};
/**
+ * struct drm_vblank_crtc_config - vblank configuration for a CRTC
+ */
+struct drm_vblank_crtc_config {
+ /**
+ * @offdelay_ms: Vblank off delay in ms, used to determine how long
+ * &drm_vblank_crtc.disable_timer waits before disabling.
+ *
+ * Defaults to the value of drm_vblank_offdelay in drm_crtc_vblank_on().
+ */
+ int offdelay_ms;
+
+ /**
+ * @disable_immediate: See &drm_device.vblank_disable_immediate
+ * for the exact semantics of immediate vblank disabling.
+ *
+ * Additionally, this tracks the disable immediate value per crtc, just
+ * in case it needs to differ from the default value for a given device.
+ *
+ * Defaults to the value of &drm_device.vblank_disable_immediate in
+ * drm_crtc_vblank_on().
+ */
+ bool disable_immediate;
+};
+
+/**
* struct drm_vblank_crtc - vblank tracking for a CRTC
*
* This structure tracks the vblank state for one CRTC.
@@ -99,8 +124,8 @@ struct drm_vblank_crtc {
wait_queue_head_t queue;
/**
* @disable_timer: Disable timer for the delayed vblank disabling
- * hysteresis logic. Vblank disabling is controlled through the
- * drm_vblank_offdelay module option and the setting of the
+ * hysteresis logic. Vblank disabling is controlled through
+ * &drm_vblank_crtc_config.offdelay_ms and the setting of the
* &drm_device.max_vblank_count value.
*/
struct timer_list disable_timer;
@@ -199,6 +224,12 @@ struct drm_vblank_crtc {
struct drm_display_mode hwmode;
/**
+ * @config: Stores vblank configuration values for a given CRTC.
+ * Also, see drm_crtc_vblank_on_config().
+ */
+ struct drm_vblank_crtc_config config;
+
+ /**
* @enabled: Tracks the enabling state of the corresponding &drm_crtc to
* avoid double-disabling and hence corrupting saved state. Needed by
* drivers not using atomic KMS, since those might go through their CRTC
@@ -247,6 +278,8 @@ void drm_wait_one_vblank(struct drm_device *dev, unsigned int pipe);
void drm_crtc_wait_one_vblank(struct drm_crtc *crtc);
void drm_crtc_vblank_off(struct drm_crtc *crtc);
void drm_crtc_vblank_reset(struct drm_crtc *crtc);
+void drm_crtc_vblank_on_config(struct drm_crtc *crtc,
+ const struct drm_vblank_crtc_config *config);
void drm_crtc_vblank_on(struct drm_crtc *crtc);
u64 drm_crtc_accurate_vblank_count(struct drm_crtc *crtc);
void drm_crtc_vblank_restore(struct drm_crtc *crtc);
diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h
index 5acc64954a88..fe8edb917360 100644
--- a/include/drm/gpu_scheduler.h
+++ b/include/drm/gpu_scheduler.h
@@ -579,7 +579,7 @@ bool drm_sched_wqueue_ready(struct drm_gpu_scheduler *sched);
void drm_sched_wqueue_stop(struct drm_gpu_scheduler *sched);
void drm_sched_wqueue_start(struct drm_gpu_scheduler *sched);
void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad);
-void drm_sched_start(struct drm_gpu_scheduler *sched, bool full_recovery);
+void drm_sched_start(struct drm_gpu_scheduler *sched);
void drm_sched_resubmit_jobs(struct drm_gpu_scheduler *sched);
void drm_sched_increase_karma(struct drm_sched_job *bad);
void drm_sched_reset_karma(struct drm_sched_job *bad);
diff --git a/include/drm/ttm/ttm_bo.h b/include/drm/ttm/ttm_bo.h
index ef0f52f56ebc..d1a732d56259 100644
--- a/include/drm/ttm/ttm_bo.h
+++ b/include/drm/ttm/ttm_bo.h
@@ -194,6 +194,41 @@ struct ttm_operation_ctx {
uint64_t bytes_moved;
};
+struct ttm_lru_walk;
+
+/** struct ttm_lru_walk_ops - Operations for a LRU walk. */
+struct ttm_lru_walk_ops {
+ /**
+ * process_bo - Process this bo.
+ * @walk: struct ttm_lru_walk describing the walk.
+ * @bo: A locked and referenced buffer object.
+ *
+ * Return: Negative error code on error, User-defined positive value
+ * (typically, but not always, size of the processed bo) on success.
+ * On success, the returned values are summed by the walk and the
+ * walk exits when its target is met.
+ * 0 also indicates success, -EBUSY means this bo was skipped.
+ */
+ s64 (*process_bo)(struct ttm_lru_walk *walk, struct ttm_buffer_object *bo);
+};
+
+/**
+ * struct ttm_lru_walk - Structure describing a LRU walk.
+ */
+struct ttm_lru_walk {
+ /** @ops: Pointer to the ops structure. */
+ const struct ttm_lru_walk_ops *ops;
+ /** @ctx: Pointer to the struct ttm_operation_ctx. */
+ struct ttm_operation_ctx *ctx;
+ /** @ticket: The struct ww_acquire_ctx if any. */
+ struct ww_acquire_ctx *ticket;
+ /** @tryock_only: Only use trylock for locking. */
+ bool trylock_only;
+};
+
+s64 ttm_lru_walk_for_evict(struct ttm_lru_walk *walk, struct ttm_device *bdev,
+ struct ttm_resource_manager *man, s64 target);
+
/**
* ttm_bo_get - reference a struct ttm_buffer_object
*
@@ -382,15 +417,14 @@ void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map);
int ttm_bo_vmap(struct ttm_buffer_object *bo, struct iosys_map *map);
void ttm_bo_vunmap(struct ttm_buffer_object *bo, struct iosys_map *map);
int ttm_bo_mmap_obj(struct vm_area_struct *vma, struct ttm_buffer_object *bo);
-int ttm_bo_swapout(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx,
- gfp_t gfp_flags);
+s64 ttm_bo_swapout(struct ttm_device *bdev, struct ttm_operation_ctx *ctx,
+ struct ttm_resource_manager *man, gfp_t gfp_flags,
+ s64 target);
void ttm_bo_pin(struct ttm_buffer_object *bo);
void ttm_bo_unpin(struct ttm_buffer_object *bo);
-int ttm_mem_evict_first(struct ttm_device *bdev,
- struct ttm_resource_manager *man,
- const struct ttm_place *place,
- struct ttm_operation_ctx *ctx,
- struct ww_acquire_ctx *ticket);
+int ttm_bo_evict_first(struct ttm_device *bdev,
+ struct ttm_resource_manager *man,
+ struct ttm_operation_ctx *ctx);
vm_fault_t ttm_bo_vm_reserve(struct ttm_buffer_object *bo,
struct vm_fault *vmf);
vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf,
diff --git a/include/drm/ttm/ttm_resource.h b/include/drm/ttm/ttm_resource.h
index 69769355139f..be034be56ba1 100644
--- a/include/drm/ttm/ttm_resource.h
+++ b/include/drm/ttm/ttm_resource.h
@@ -49,6 +49,43 @@ struct io_mapping;
struct sg_table;
struct scatterlist;
+/**
+ * enum ttm_lru_item_type - enumerate ttm_lru_item subclasses
+ */
+enum ttm_lru_item_type {
+ /** @TTM_LRU_RESOURCE: The resource subclass */
+ TTM_LRU_RESOURCE,
+ /** @TTM_LRU_HITCH: The iterator hitch subclass */
+ TTM_LRU_HITCH
+};
+
+/**
+ * struct ttm_lru_item - The TTM lru list node base class
+ * @link: The list link
+ * @type: The subclass type
+ */
+struct ttm_lru_item {
+ struct list_head link;
+ enum ttm_lru_item_type type;
+};
+
+/**
+ * ttm_lru_item_init() - initialize a struct ttm_lru_item
+ * @item: The item to initialize
+ * @type: The subclass type
+ */
+static inline void ttm_lru_item_init(struct ttm_lru_item *item,
+ enum ttm_lru_item_type type)
+{
+ item->type = type;
+ INIT_LIST_HEAD(&item->link);
+}
+
+static inline bool ttm_lru_item_is_res(const struct ttm_lru_item *item)
+{
+ return item->type == TTM_LRU_RESOURCE;
+}
+
struct ttm_resource_manager_func {
/**
* struct ttm_resource_manager_func member alloc
@@ -217,19 +254,20 @@ struct ttm_resource {
/**
* @lru: Least recently used list, see &ttm_resource_manager.lru
*/
- struct list_head lru;
+ struct ttm_lru_item lru;
};
/**
- * struct ttm_resource_cursor
+ * ttm_lru_item_to_res() - Downcast a struct ttm_lru_item to a struct ttm_resource
+ * @item: The struct ttm_lru_item to downcast
*
- * @priority: the current priority
- *
- * Cursor to iterate over the resources in a manager.
+ * Return: Pointer to the embedding struct ttm_resource
*/
-struct ttm_resource_cursor {
- unsigned int priority;
-};
+static inline struct ttm_resource *
+ttm_lru_item_to_res(struct ttm_lru_item *item)
+{
+ return container_of(item, struct ttm_resource, lru);
+}
/**
* struct ttm_lru_bulk_move_pos
@@ -246,8 +284,9 @@ struct ttm_lru_bulk_move_pos {
/**
* struct ttm_lru_bulk_move
- *
* @pos: first/last lru entry for resources in the each domain/priority
+ * @cursor_list: The list of cursors currently traversing any of
+ * the sublists of @pos. Protected by the ttm device's lru_lock.
*
* Container for the current bulk move state. Should be used with
* ttm_lru_bulk_move_init() and ttm_bo_set_bulk_move().
@@ -257,9 +296,38 @@ struct ttm_lru_bulk_move_pos {
*/
struct ttm_lru_bulk_move {
struct ttm_lru_bulk_move_pos pos[TTM_NUM_MEM_TYPES][TTM_MAX_BO_PRIORITY];
+ struct list_head cursor_list;
};
/**
+ * struct ttm_resource_cursor
+ * @man: The resource manager currently being iterated over
+ * @hitch: A hitch list node inserted before the next resource
+ * to iterate over.
+ * @bulk_link: A list link for the list of cursors traversing the
+ * bulk sublist of @bulk. Protected by the ttm device's lru_lock.
+ * @bulk: Pointer to struct ttm_lru_bulk_move whose subrange @hitch is
+ * inserted to. NULL if none. Never dereference this pointer since
+ * the struct ttm_lru_bulk_move object pointed to might have been
+ * freed. The pointer is only for comparison.
+ * @mem_type: The memory type of the LRU list being traversed.
+ * This field is valid iff @bulk != NULL.
+ * @priority: the current priority
+ *
+ * Cursor to iterate over the resources in a manager.
+ */
+struct ttm_resource_cursor {
+ struct ttm_resource_manager *man;
+ struct ttm_lru_item hitch;
+ struct list_head bulk_link;
+ struct ttm_lru_bulk_move *bulk;
+ unsigned int mem_type;
+ unsigned int priority;
+};
+
+void ttm_resource_cursor_fini(struct ttm_resource_cursor *cursor);
+
+/**
* struct ttm_kmap_iter_iomap - Specialization for a struct io_mapping +
* struct sg_table backed struct ttm_resource.
* @base: Embedded struct ttm_kmap_iter providing the usage interface.
@@ -347,6 +415,8 @@ ttm_resource_manager_cleanup(struct ttm_resource_manager *man)
void ttm_lru_bulk_move_init(struct ttm_lru_bulk_move *bulk);
void ttm_lru_bulk_move_tail(struct ttm_lru_bulk_move *bulk);
+void ttm_lru_bulk_move_fini(struct ttm_device *bdev,
+ struct ttm_lru_bulk_move *bulk);
void ttm_resource_add_bulk_move(struct ttm_resource *res,
struct ttm_buffer_object *bo);
@@ -389,9 +459,10 @@ struct ttm_resource *
ttm_resource_manager_first(struct ttm_resource_manager *man,
struct ttm_resource_cursor *cursor);
struct ttm_resource *
-ttm_resource_manager_next(struct ttm_resource_manager *man,
- struct ttm_resource_cursor *cursor,
- struct ttm_resource *res);
+ttm_resource_manager_next(struct ttm_resource_cursor *cursor);
+
+struct ttm_resource *
+ttm_lru_first_res_or_null(struct list_head *head);
/**
* ttm_resource_manager_for_each_res - iterate over all resources
@@ -403,7 +474,7 @@ ttm_resource_manager_next(struct ttm_resource_manager *man,
*/
#define ttm_resource_manager_for_each_res(man, cursor, res) \
for (res = ttm_resource_manager_first(man, cursor); res; \
- res = ttm_resource_manager_next(man, cursor, res))
+ res = ttm_resource_manager_next(cursor))
struct ttm_kmap_iter *
ttm_kmap_iter_iomap_init(struct ttm_kmap_iter_iomap *iter_io,
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index 2594553bb30b..2df665fa2964 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -297,6 +297,15 @@ static inline void *offset_to_ptr(const int *off)
#define is_unsigned_type(type) (!is_signed_type(type))
/*
+ * Useful shorthand for "is this condition known at compile-time?"
+ *
+ * Note that the condition may involve non-constant values,
+ * but the compiler may know enough about the details of the
+ * values to determine that the condition is statically true.
+ */
+#define statically_true(x) (__builtin_constant_p(x) && (x))
+
+/*
* This is needed in functions which generate the stack canary, see
* arch/x86/kernel/smpboot.c::start_secondary() for an example.
*/
diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h
index 51ba681b915a..9316c39260e0 100644
--- a/include/linux/cpuhotplug.h
+++ b/include/linux/cpuhotplug.h
@@ -100,7 +100,6 @@ enum cpuhp_state {
CPUHP_WORKQUEUE_PREP,
CPUHP_POWER_NUMA_PREPARE,
CPUHP_HRTIMERS_PREPARE,
- CPUHP_PROFILE_PREPARE,
CPUHP_X2APIC_PREPARE,
CPUHP_SMPCFD_PREPARE,
CPUHP_RELAY_PREPARE,
@@ -148,6 +147,7 @@ enum cpuhp_state {
CPUHP_AP_IRQ_LOONGARCH_STARTING,
CPUHP_AP_IRQ_SIFIVE_PLIC_STARTING,
CPUHP_AP_IRQ_RISCV_IMSIC_STARTING,
+ CPUHP_AP_IRQ_RISCV_SBI_IPI_STARTING,
CPUHP_AP_ARM_MVEBU_COHERENCY,
CPUHP_AP_PERF_X86_AMD_UNCORE_STARTING,
CPUHP_AP_PERF_X86_STARTING,
diff --git a/include/linux/dma-heap.h b/include/linux/dma-heap.h
index 064bad725061..27d15f60950a 100644
--- a/include/linux/dma-heap.h
+++ b/include/linux/dma-heap.h
@@ -9,14 +9,13 @@
#ifndef _DMA_HEAPS_H
#define _DMA_HEAPS_H
-#include <linux/cdev.h>
#include <linux/types.h>
struct dma_heap;
/**
* struct dma_heap_ops - ops to operate on a given heap
- * @allocate: allocate dmabuf and return struct dma_buf ptr
+ * @allocate: allocate dmabuf and return struct dma_buf ptr
*
* allocate returns dmabuf on success, ERR_PTR(-errno) on error.
*/
@@ -41,28 +40,10 @@ struct dma_heap_export_info {
void *priv;
};
-/**
- * dma_heap_get_drvdata() - get per-heap driver data
- * @heap: DMA-Heap to retrieve private data for
- *
- * Returns:
- * The per-heap data for the heap.
- */
void *dma_heap_get_drvdata(struct dma_heap *heap);
-/**
- * dma_heap_get_name() - get heap name
- * @heap: DMA-Heap to retrieve private data for
- *
- * Returns:
- * The char* for the heap name.
- */
const char *dma_heap_get_name(struct dma_heap *heap);
-/**
- * dma_heap_add - adds a heap to dmabuf heaps
- * @exp_info: information needed to register this heap
- */
struct dma_heap *dma_heap_add(const struct dma_heap_export_info *exp_info);
#endif /* _DMA_HEAPS_H */
diff --git a/include/linux/fb.h b/include/linux/fb.h
index db7d97b10964..865dad03e73e 100644
--- a/include/linux/fb.h
+++ b/include/linux/fb.h
@@ -510,6 +510,7 @@ struct fb_info {
void *par;
bool skip_vt_switch; /* no VT switch on suspend/resume required */
+ bool skip_panic; /* Do not write to the fb after a panic */
};
/* This will go away
diff --git a/include/linux/kmsg_dump.h b/include/linux/kmsg_dump.h
index 906521c2329c..6055fc969877 100644
--- a/include/linux/kmsg_dump.h
+++ b/include/linux/kmsg_dump.h
@@ -40,6 +40,17 @@ struct kmsg_dump_iter {
};
/**
+ * struct kmsg_dump_detail - kernel crash detail
+ * @reason: reason for the crash, see kmsg_dump_reason.
+ * @description: optional short string, to provide additional information.
+ */
+
+struct kmsg_dump_detail {
+ enum kmsg_dump_reason reason;
+ const char *description;
+};
+
+/**
* struct kmsg_dumper - kernel crash message dumper structure
* @list: Entry in the dumper list (private)
* @dump: Call into dumping code which will retrieve the data with
@@ -49,13 +60,13 @@ struct kmsg_dump_iter {
*/
struct kmsg_dumper {
struct list_head list;
- void (*dump)(struct kmsg_dumper *dumper, enum kmsg_dump_reason reason);
+ void (*dump)(struct kmsg_dumper *dumper, struct kmsg_dump_detail *detail);
enum kmsg_dump_reason max_reason;
bool registered;
};
#ifdef CONFIG_PRINTK
-void kmsg_dump(enum kmsg_dump_reason reason);
+void kmsg_dump_desc(enum kmsg_dump_reason reason, const char *desc);
bool kmsg_dump_get_line(struct kmsg_dump_iter *iter, bool syslog,
char *line, size_t size, size_t *len);
@@ -71,7 +82,7 @@ int kmsg_dump_unregister(struct kmsg_dumper *dumper);
const char *kmsg_dump_reason_str(enum kmsg_dump_reason reason);
#else
-static inline void kmsg_dump(enum kmsg_dump_reason reason)
+static inline void kmsg_dump_desc(enum kmsg_dump_reason reason, const char *desc)
{
}
@@ -107,4 +118,9 @@ static inline const char *kmsg_dump_reason_str(enum kmsg_dump_reason reason)
}
#endif
+static inline void kmsg_dump(enum kmsg_dump_reason reason)
+{
+ kmsg_dump_desc(reason, NULL);
+}
+
#endif /* _LINUX_KMSG_DUMP_H */
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 689e8be873a7..79a6b1a63027 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -2414,7 +2414,7 @@ static inline unsigned long kvm_get_memory_attributes(struct kvm *kvm, gfn_t gfn
}
bool kvm_range_has_memory_attributes(struct kvm *kvm, gfn_t start, gfn_t end,
- unsigned long attrs);
+ unsigned long mask, unsigned long attrs);
bool kvm_arch_pre_set_memory_attributes(struct kvm *kvm,
struct kvm_gfn_range *range);
bool kvm_arch_post_set_memory_attributes(struct kvm *kvm,
@@ -2445,11 +2445,11 @@ static inline int kvm_gmem_get_pfn(struct kvm *kvm,
}
#endif /* CONFIG_KVM_PRIVATE_MEM */
-#ifdef CONFIG_HAVE_KVM_GMEM_PREPARE
+#ifdef CONFIG_HAVE_KVM_ARCH_GMEM_PREPARE
int kvm_arch_gmem_prepare(struct kvm *kvm, gfn_t gfn, kvm_pfn_t pfn, int max_order);
-bool kvm_arch_gmem_prepare_needed(struct kvm *kvm);
#endif
+#ifdef CONFIG_KVM_GENERIC_PRIVATE_MEM
/**
* kvm_gmem_populate() - Populate/prepare a GPA range with guest data
*
@@ -2476,8 +2476,9 @@ typedef int (*kvm_gmem_populate_cb)(struct kvm *kvm, gfn_t gfn, kvm_pfn_t pfn,
long kvm_gmem_populate(struct kvm *kvm, gfn_t gfn, void __user *src, long npages,
kvm_gmem_populate_cb post_populate, void *opaque);
+#endif
-#ifdef CONFIG_HAVE_KVM_GMEM_INVALIDATE
+#ifdef CONFIG_HAVE_KVM_ARCH_GMEM_INVALIDATE
void kvm_arch_gmem_invalidate(kvm_pfn_t start, kvm_pfn_t end);
#endif
diff --git a/include/linux/minmax.h b/include/linux/minmax.h
index 9c2848abc804..98008dd92153 100644
--- a/include/linux/minmax.h
+++ b/include/linux/minmax.h
@@ -26,19 +26,63 @@
#define __typecheck(x, y) \
(!!(sizeof((typeof(x) *)1 == (typeof(y) *)1)))
-/* is_signed_type() isn't a constexpr for pointer types */
-#define __is_signed(x) \
- __builtin_choose_expr(__is_constexpr(is_signed_type(typeof(x))), \
- is_signed_type(typeof(x)), 0)
+/*
+ * __sign_use for integer expressions:
+ * bit #0 set if ok for unsigned comparisons
+ * bit #1 set if ok for signed comparisons
+ *
+ * In particular, statically non-negative signed integer
+ * expressions are ok for both.
+ *
+ * NOTE! Unsigned types smaller than 'int' are implicitly
+ * converted to 'int' in expressions, and are accepted for
+ * signed conversions for now. This is debatable.
+ *
+ * Note that 'x' is the original expression, and 'ux' is
+ * the unique variable that contains the value.
+ *
+ * We use 'ux' for pure type checking, and 'x' for when
+ * we need to look at the value (but without evaluating
+ * it for side effects! Careful to only ever evaluate it
+ * with sizeof() or __builtin_constant_p() etc).
+ *
+ * Pointers end up being checked by the normal C type
+ * rules at the actual comparison, and these expressions
+ * only need to be careful to not cause warnings for
+ * pointer use.
+ */
+#define __signed_type_use(x,ux) (2+__is_nonneg(x,ux))
+#define __unsigned_type_use(x,ux) (1+2*(sizeof(ux)<4))
+#define __sign_use(x,ux) (is_signed_type(typeof(ux))? \
+ __signed_type_use(x,ux):__unsigned_type_use(x,ux))
-/* True for a non-negative signed int constant */
-#define __is_noneg_int(x) \
- (__builtin_choose_expr(__is_constexpr(x) && __is_signed(x), x, -1) >= 0)
+/*
+ * To avoid warnings about casting pointers to integers
+ * of different sizes, we need that special sign type.
+ *
+ * On 64-bit we can just always use 'long', since any
+ * integer or pointer type can just be cast to that.
+ *
+ * This does not work for 128-bit signed integers since
+ * the cast would truncate them, but we do not use s128
+ * types in the kernel (we do use 'u128', but they will
+ * be handled by the !is_signed_type() case).
+ *
+ * NOTE! The cast is there only to avoid any warnings
+ * from when values that aren't signed integer types.
+ */
+#ifdef CONFIG_64BIT
+ #define __signed_type(ux) long
+#else
+ #define __signed_type(ux) typeof(__builtin_choose_expr(sizeof(ux)>4,1LL,1L))
+#endif
+#define __is_nonneg(x,ux) statically_true((__signed_type(ux))(x)>=0)
+
+#define __types_ok(x,y,ux,uy) \
+ (__sign_use(x,ux) & __sign_use(y,uy))
-#define __types_ok(x, y) \
- (__is_signed(x) == __is_signed(y) || \
- __is_signed((x) + 0) == __is_signed((y) + 0) || \
- __is_noneg_int(x) || __is_noneg_int(y))
+#define __types_ok3(x,y,z,ux,uy,uz) \
+ (__sign_use(x,ux) & __sign_use(y,uy) & __sign_use(z,uz))
#define __cmp_op_min <
#define __cmp_op_max >
@@ -51,34 +95,31 @@
#define __cmp_once(op, type, x, y) \
__cmp_once_unique(op, type, x, y, __UNIQUE_ID(x_), __UNIQUE_ID(y_))
-#define __careful_cmp_once(op, x, y) ({ \
- static_assert(__types_ok(x, y), \
- #op "(" #x ", " #y ") signedness error, fix types or consider u" #op "() before " #op "_t()"); \
- __cmp_once(op, __auto_type, x, y); })
+#define __careful_cmp_once(op, x, y, ux, uy) ({ \
+ __auto_type ux = (x); __auto_type uy = (y); \
+ BUILD_BUG_ON_MSG(!__types_ok(x,y,ux,uy), \
+ #op"("#x", "#y") signedness error"); \
+ __cmp(op, ux, uy); })
-#define __careful_cmp(op, x, y) \
- __builtin_choose_expr(__is_constexpr((x) - (y)), \
- __cmp(op, x, y), __careful_cmp_once(op, x, y))
+#define __careful_cmp(op, x, y) \
+ __careful_cmp_once(op, x, y, __UNIQUE_ID(x_), __UNIQUE_ID(y_))
#define __clamp(val, lo, hi) \
((val) >= (hi) ? (hi) : ((val) <= (lo) ? (lo) : (val)))
-#define __clamp_once(val, lo, hi, unique_val, unique_lo, unique_hi) ({ \
- typeof(val) unique_val = (val); \
- typeof(lo) unique_lo = (lo); \
- typeof(hi) unique_hi = (hi); \
+#define __clamp_once(val, lo, hi, uval, ulo, uhi) ({ \
+ __auto_type uval = (val); \
+ __auto_type ulo = (lo); \
+ __auto_type uhi = (hi); \
static_assert(__builtin_choose_expr(__is_constexpr((lo) > (hi)), \
(lo) <= (hi), true), \
"clamp() low limit " #lo " greater than high limit " #hi); \
- static_assert(__types_ok(val, lo), "clamp() 'lo' signedness error"); \
- static_assert(__types_ok(val, hi), "clamp() 'hi' signedness error"); \
- __clamp(unique_val, unique_lo, unique_hi); })
+ BUILD_BUG_ON_MSG(!__types_ok3(val,lo,hi,uval,ulo,uhi), \
+ "clamp("#val", "#lo", "#hi") signedness error"); \
+ __clamp(uval, ulo, uhi); })
-#define __careful_clamp(val, lo, hi) ({ \
- __builtin_choose_expr(__is_constexpr((val) - (lo) + (hi)), \
- __clamp(val, lo, hi), \
- __clamp_once(val, lo, hi, __UNIQUE_ID(__val), \
- __UNIQUE_ID(__lo), __UNIQUE_ID(__hi))); })
+#define __careful_clamp(val, lo, hi) \
+ __clamp_once(val, lo, hi, __UNIQUE_ID(v_), __UNIQUE_ID(l_), __UNIQUE_ID(h_))
/**
* min - return minimum of two values of the same or compatible types
@@ -111,13 +152,20 @@
#define umax(x, y) \
__careful_cmp(max, (x) + 0u + 0ul + 0ull, (y) + 0u + 0ul + 0ull)
+#define __careful_op3(op, x, y, z, ux, uy, uz) ({ \
+ __auto_type ux = (x); __auto_type uy = (y);__auto_type uz = (z);\
+ BUILD_BUG_ON_MSG(!__types_ok3(x,y,z,ux,uy,uz), \
+ #op"3("#x", "#y", "#z") signedness error"); \
+ __cmp(op, ux, __cmp(op, uy, uz)); })
+
/**
* min3 - return minimum of three values
* @x: first value
* @y: second value
* @z: third value
*/
-#define min3(x, y, z) min((typeof(x))min(x, y), z)
+#define min3(x, y, z) \
+ __careful_op3(min, x, y, z, __UNIQUE_ID(x_), __UNIQUE_ID(y_), __UNIQUE_ID(z_))
/**
* max3 - return maximum of three values
@@ -125,7 +173,8 @@
* @y: second value
* @z: third value
*/
-#define max3(x, y, z) max((typeof(x))max(x, y), z)
+#define max3(x, y, z) \
+ __careful_op3(max, x, y, z, __UNIQUE_ID(x_), __UNIQUE_ID(y_), __UNIQUE_ID(z_))
/**
* min_not_zero - return the minimum that is _not_ zero, unless both are zero
@@ -277,6 +326,8 @@ static inline bool in_range32(u32 val, u32 start, u32 len)
* Use these carefully: no type checking, and uses the arguments
* multiple times. Use for obvious constants only.
*/
+#define MIN(a,b) __cmp(min,a,b)
+#define MAX(a,b) __cmp(max,a,b)
#define MIN_T(type,a,b) __cmp(min,(type)(a),(type)(b))
#define MAX_T(type,a,b) __cmp(max,(type)(a),(type)(b))
diff --git a/include/linux/profile.h b/include/linux/profile.h
index 2fb487f61d12..3f53cdb0c27c 100644
--- a/include/linux/profile.h
+++ b/include/linux/profile.h
@@ -10,7 +10,6 @@
#define CPU_PROFILING 1
#define SCHED_PROFILING 2
-#define SLEEP_PROFILING 3
#define KVM_PROFILING 4
struct proc_dir_entry;
diff --git a/include/linux/virtio.h b/include/linux/virtio.h
index ecc5cb7b8c91..4b16844c6bc2 100644
--- a/include/linux/virtio.h
+++ b/include/linux/virtio.h
@@ -10,6 +10,7 @@
#include <linux/mod_devicetable.h>
#include <linux/gfp.h>
#include <linux/dma-mapping.h>
+#include <linux/completion.h>
/**
* struct virtqueue - a queue to register buffers for sending or receiving.
@@ -109,6 +110,8 @@ struct virtio_admin_cmd {
__le64 group_member_id;
struct scatterlist *data_sg;
struct scatterlist *result_sg;
+ struct completion completion;
+ int ret;
};
/**
diff --git a/include/linux/virtio_config.h b/include/linux/virtio_config.h
index ab4b9a3fef6b..169c7d367fac 100644
--- a/include/linux/virtio_config.h
+++ b/include/linux/virtio_config.h
@@ -104,8 +104,6 @@ struct virtqueue_info {
* Returns 0 on success or error status
* If disable_vq_and_reset is set, then enable_vq_after_reset must also be
* set.
- * @create_avq: create admin virtqueue resource.
- * @destroy_avq: destroy admin virtqueue resource.
*/
struct virtio_config_ops {
void (*get)(struct virtio_device *vdev, unsigned offset,
@@ -133,8 +131,6 @@ struct virtio_config_ops {
struct virtio_shm_region *region, u8 id);
int (*disable_vq_and_reset)(struct virtqueue *vq);
int (*enable_vq_after_reset)(struct virtqueue *vq);
- int (*create_avq)(struct virtio_device *vdev);
- void (*destroy_avq)(struct virtio_device *vdev);
};
/* If driver didn't advertise the feature, it will never appear. */
diff --git a/include/linux/virtio_net.h b/include/linux/virtio_net.h
index d1d7825318c3..6c395a2600e8 100644
--- a/include/linux/virtio_net.h
+++ b/include/linux/virtio_net.h
@@ -56,7 +56,6 @@ static inline int virtio_net_hdr_to_skb(struct sk_buff *skb,
unsigned int thlen = 0;
unsigned int p_off = 0;
unsigned int ip_proto;
- u64 ret, remainder, gso_size;
if (hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) {
switch (hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
@@ -99,16 +98,6 @@ static inline int virtio_net_hdr_to_skb(struct sk_buff *skb,
u32 off = __virtio16_to_cpu(little_endian, hdr->csum_offset);
u32 needed = start + max_t(u32, thlen, off + sizeof(__sum16));
- if (hdr->gso_size) {
- gso_size = __virtio16_to_cpu(little_endian, hdr->gso_size);
- ret = div64_u64_rem(skb->len, gso_size, &remainder);
- if (!(ret && (hdr->gso_size > needed) &&
- ((remainder > needed) || (remainder == 0)))) {
- return -EINVAL;
- }
- skb_shinfo(skb)->tx_flags |= SKBFL_SHARED_FRAG;
- }
-
if (!pskb_may_pull(skb, needed))
return -EINVAL;
@@ -182,6 +171,11 @@ retry:
if (gso_type != SKB_GSO_UDP_L4)
return -EINVAL;
break;
+ case SKB_GSO_TCPV4:
+ case SKB_GSO_TCPV6:
+ if (skb->csum_offset != offsetof(struct tcphdr, check))
+ return -EINVAL;
+ break;
}
/* Kernel has a special handling for GSO_BY_FRAGS. */
diff --git a/include/sound/ump_convert.h b/include/sound/ump_convert.h
index 28c364c63245..d099ae27f849 100644
--- a/include/sound/ump_convert.h
+++ b/include/sound/ump_convert.h
@@ -13,6 +13,7 @@ struct ump_cvt_to_ump_bank {
unsigned char cc_nrpn_msb, cc_nrpn_lsb;
unsigned char cc_data_msb, cc_data_lsb;
unsigned char cc_bank_msb, cc_bank_lsb;
+ bool cc_data_msb_set, cc_data_lsb_set;
};
/* context for converting from MIDI1 byte stream to UMP packet */
diff --git a/include/trace/events/btrfs.h b/include/trace/events/btrfs.h
index 246c0fbd582e..0a523023bdcc 100644
--- a/include/trace/events/btrfs.h
+++ b/include/trace/events/btrfs.h
@@ -2383,6 +2383,14 @@ DEFINE_EVENT(btrfs__space_info_update, update_bytes_pinned,
TP_ARGS(fs_info, sinfo, old, diff)
);
+DEFINE_EVENT(btrfs__space_info_update, update_bytes_zone_unusable,
+
+ TP_PROTO(const struct btrfs_fs_info *fs_info,
+ const struct btrfs_space_info *sinfo, u64 old, s64 diff),
+
+ TP_ARGS(fs_info, sinfo, old, diff)
+);
+
DECLARE_EVENT_CLASS(btrfs_raid56_bio,
TP_PROTO(const struct btrfs_raid_bio *rbio,
diff --git a/include/trace/events/mptcp.h b/include/trace/events/mptcp.h
index 09e72215b9f9..085b749cdd97 100644
--- a/include/trace/events/mptcp.h
+++ b/include/trace/events/mptcp.h
@@ -34,7 +34,7 @@ TRACE_EVENT(mptcp_subflow_get_send,
struct sock *ssk;
__entry->active = mptcp_subflow_active(subflow);
- __entry->backup = subflow->backup;
+ __entry->backup = subflow->backup || subflow->request_bkup;
if (subflow->tcp_sock && sk_fullsock(subflow->tcp_sock))
__entry->free = sk_stream_memory_free(subflow->tcp_sock);
diff --git a/include/uapi/asm-generic/unistd.h b/include/uapi/asm-generic/unistd.h
index 985a262d0f9e..5bf6148cac2b 100644
--- a/include/uapi/asm-generic/unistd.h
+++ b/include/uapi/asm-generic/unistd.h
@@ -841,11 +841,8 @@ __SYSCALL(__NR_lsm_list_modules, sys_lsm_list_modules)
#define __NR_mseal 462
__SYSCALL(__NR_mseal, sys_mseal)
-#define __NR_uretprobe 463
-__SYSCALL(__NR_uretprobe, sys_uretprobe)
-
#undef __NR_syscalls
-#define __NR_syscalls 464
+#define __NR_syscalls 463
/*
* 32 bit systems traditionally used different
diff --git a/include/uapi/drm/xe_drm.h b/include/uapi/drm/xe_drm.h
index 19619d4952a8..b6fbe4988f2e 100644
--- a/include/uapi/drm/xe_drm.h
+++ b/include/uapi/drm/xe_drm.h
@@ -517,7 +517,14 @@ struct drm_xe_query_gt_list {
* available per Dual Sub Slices (DSS). For example a query response
* containing the following in mask:
* ``EU_PER_DSS ff ff 00 00 00 00 00 00``
- * means each DSS has 16 EU.
+ * means each DSS has 16 SIMD8 EUs. This type may be omitted if device
+ * doesn't have SIMD8 EUs.
+ * - %DRM_XE_TOPO_SIMD16_EU_PER_DSS - To query the mask of SIMD16 Execution
+ * Units (EU) available per Dual Sub Slices (DSS). For example a query
+ * response containing the following in mask:
+ * ``SIMD16_EU_PER_DSS ff ff 00 00 00 00 00 00``
+ * means each DSS has 16 SIMD16 EUs. This type may be omitted if device
+ * doesn't have SIMD16 EUs.
*/
struct drm_xe_query_topology_mask {
/** @gt_id: GT ID the mask is associated with */
@@ -527,6 +534,7 @@ struct drm_xe_query_topology_mask {
#define DRM_XE_TOPO_DSS_COMPUTE 2
#define DRM_XE_TOPO_L3_BANK 3
#define DRM_XE_TOPO_EU_PER_DSS 4
+#define DRM_XE_TOPO_SIMD16_EU_PER_DSS 5
/** @type: type of mask */
__u16 type;
@@ -1590,10 +1598,10 @@ enum drm_xe_oa_property_id {
* b. Counter select c. Counter size and d. BC report. Also refer to the
* oa_formats array in drivers/gpu/drm/xe/xe_oa.c.
*/
-#define DRM_XE_OA_FORMAT_MASK_FMT_TYPE (0xff << 0)
-#define DRM_XE_OA_FORMAT_MASK_COUNTER_SEL (0xff << 8)
-#define DRM_XE_OA_FORMAT_MASK_COUNTER_SIZE (0xff << 16)
-#define DRM_XE_OA_FORMAT_MASK_BC_REPORT (0xff << 24)
+#define DRM_XE_OA_FORMAT_MASK_FMT_TYPE (0xffu << 0)
+#define DRM_XE_OA_FORMAT_MASK_COUNTER_SEL (0xffu << 8)
+#define DRM_XE_OA_FORMAT_MASK_COUNTER_SIZE (0xffu << 16)
+#define DRM_XE_OA_FORMAT_MASK_BC_REPORT (0xffu << 24)
/**
* @DRM_XE_OA_PROPERTY_OA_PERIOD_EXPONENT: Requests periodic OA unit
diff --git a/include/uapi/linux/virtio_gpu.h b/include/uapi/linux/virtio_gpu.h
index 0e21f3998108..bf2c9cabd207 100644
--- a/include/uapi/linux/virtio_gpu.h
+++ b/include/uapi/linux/virtio_gpu.h
@@ -311,6 +311,7 @@ struct virtio_gpu_cmd_submit {
#define VIRTIO_GPU_CAPSET_VIRGL2 2
/* 3 is reserved for gfxstream */
#define VIRTIO_GPU_CAPSET_VENUS 4
+#define VIRTIO_GPU_CAPSET_DRM 6
/* VIRTIO_GPU_CMD_GET_CAPSET_INFO */
struct virtio_gpu_get_capset_info {
diff --git a/include/ufs/ufshcd.h b/include/ufs/ufshcd.h
index a43b14276bc3..cac0cdb9a916 100644
--- a/include/ufs/ufshcd.h
+++ b/include/ufs/ufshcd.h
@@ -1109,6 +1109,7 @@ struct ufs_hba {
bool ext_iid_sup;
bool scsi_host_added;
bool mcq_sup;
+ bool lsdb_sup;
bool mcq_enabled;
struct ufshcd_res_info res[RES_MAX];
void __iomem *mcq_base;
diff --git a/include/ufs/ufshci.h b/include/ufs/ufshci.h
index 38fe97971a65..9917c7743d80 100644
--- a/include/ufs/ufshci.h
+++ b/include/ufs/ufshci.h
@@ -77,6 +77,7 @@ enum {
MASK_OUT_OF_ORDER_DATA_DELIVERY_SUPPORT = 0x02000000,
MASK_UIC_DME_TEST_MODE_SUPPORT = 0x04000000,
MASK_CRYPTO_SUPPORT = 0x10000000,
+ MASK_LSDB_SUPPORT = 0x20000000,
MASK_MCQ_SUPPORT = 0x40000000,
};
diff --git a/init/Kconfig b/init/Kconfig
index a465ea9525bd..37260d17267e 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -1902,6 +1902,7 @@ config RUST
depends on !MODVERSIONS
depends on !GCC_PLUGINS
depends on !RANDSTRUCT
+ depends on !SHADOW_CALL_STACK
depends on !DEBUG_INFO_BTF || PAHOLE_HAS_LANG_EXCLUDE
help
Enables Rust support in the kernel.
diff --git a/io_uring/napi.c b/io_uring/napi.c
index 4fd6bb331e1e..a3dc3762008f 100644
--- a/io_uring/napi.c
+++ b/io_uring/napi.c
@@ -205,7 +205,6 @@ void io_napi_init(struct io_ring_ctx *ctx)
void io_napi_free(struct io_ring_ctx *ctx)
{
struct io_napi_entry *e;
- LIST_HEAD(napi_list);
unsigned int i;
spin_lock(&ctx->napi_lock);
@@ -315,7 +314,6 @@ void __io_napi_busy_loop(struct io_ring_ctx *ctx, struct io_wait_queue *iowq)
*/
int io_napi_sqpoll_busy_poll(struct io_ring_ctx *ctx)
{
- LIST_HEAD(napi_list);
bool is_stale = false;
if (!READ_ONCE(ctx->napi_busy_poll_dt))
diff --git a/io_uring/poll.c b/io_uring/poll.c
index 0a8e02944689..1f63b60e85e7 100644
--- a/io_uring/poll.c
+++ b/io_uring/poll.c
@@ -347,6 +347,7 @@ static int io_poll_check_events(struct io_kiocb *req, struct io_tw_state *ts)
v &= IO_POLL_REF_MASK;
} while (atomic_sub_return(v, &req->poll_refs) & IO_POLL_REF_MASK);
+ io_napi_add(req);
return IOU_POLL_NO_ACTION;
}
diff --git a/kernel/jump_label.c b/kernel/jump_label.c
index 4ad5ed8adf96..6dc76b590703 100644
--- a/kernel/jump_label.c
+++ b/kernel/jump_label.c
@@ -236,7 +236,7 @@ void static_key_disable_cpuslocked(struct static_key *key)
}
jump_label_lock();
- if (atomic_cmpxchg(&key->enabled, 1, 0))
+ if (atomic_cmpxchg(&key->enabled, 1, 0) == 1)
jump_label_update(key);
jump_label_unlock();
}
@@ -289,7 +289,7 @@ static void __static_key_slow_dec_cpuslocked(struct static_key *key)
return;
guard(mutex)(&jump_label_mutex);
- if (atomic_cmpxchg(&key->enabled, 1, 0))
+ if (atomic_cmpxchg(&key->enabled, 1, 0) == 1)
jump_label_update(key);
else
WARN_ON_ONCE(!static_key_slow_try_dec(key));
diff --git a/kernel/ksysfs.c b/kernel/ksysfs.c
index 07fb5987b42b..1bab21b4718f 100644
--- a/kernel/ksysfs.c
+++ b/kernel/ksysfs.c
@@ -92,7 +92,14 @@ static ssize_t profiling_store(struct kobject *kobj,
const char *buf, size_t count)
{
int ret;
+ static DEFINE_MUTEX(lock);
+ /*
+ * We need serialization, for profile_setup() initializes prof_on
+ * value and profile_init() must not reallocate prof_buffer after
+ * once allocated.
+ */
+ guard(mutex)(&lock);
if (prof_on)
return -EEXIST;
/*
diff --git a/kernel/locking/qspinlock_paravirt.h b/kernel/locking/qspinlock_paravirt.h
index f5a36e67b593..ac2e22502741 100644
--- a/kernel/locking/qspinlock_paravirt.h
+++ b/kernel/locking/qspinlock_paravirt.h
@@ -357,7 +357,7 @@ static void pv_wait_node(struct mcs_spinlock *node, struct mcs_spinlock *prev)
static void pv_kick_node(struct qspinlock *lock, struct mcs_spinlock *node)
{
struct pv_node *pn = (struct pv_node *)node;
- enum vcpu_state old = vcpu_halted;
+ u8 old = vcpu_halted;
/*
* If the vCPU is indeed halted, advance its state to match that of
* pv_wait_node(). If OTOH this fails, the vCPU was running and will
diff --git a/kernel/panic.c b/kernel/panic.c
index f861bedc1925..32b1c1c93b7a 100644
--- a/kernel/panic.c
+++ b/kernel/panic.c
@@ -376,7 +376,7 @@ void panic(const char *fmt, ...)
panic_print_sys_info(false);
- kmsg_dump(KMSG_DUMP_PANIC);
+ kmsg_dump_desc(KMSG_DUMP_PANIC, buf);
/*
* If you doubt kdump always works fine in any situation,
diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
index 054c0e7784fd..041a0f76688d 100644
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
@@ -4184,16 +4184,21 @@ const char *kmsg_dump_reason_str(enum kmsg_dump_reason reason)
EXPORT_SYMBOL_GPL(kmsg_dump_reason_str);
/**
- * kmsg_dump - dump kernel log to kernel message dumpers.
+ * kmsg_dump_desc - dump kernel log to kernel message dumpers.
* @reason: the reason (oops, panic etc) for dumping
+ * @desc: a short string to describe what caused the panic or oops. Can be NULL
+ * if no additional description is available.
*
* Call each of the registered dumper's dump() callback, which can
* retrieve the kmsg records with kmsg_dump_get_line() or
* kmsg_dump_get_buffer().
*/
-void kmsg_dump(enum kmsg_dump_reason reason)
+void kmsg_dump_desc(enum kmsg_dump_reason reason, const char *desc)
{
struct kmsg_dumper *dumper;
+ struct kmsg_dump_detail detail = {
+ .reason = reason,
+ .description = desc};
rcu_read_lock();
list_for_each_entry_rcu(dumper, &dump_list, list) {
@@ -4211,7 +4216,7 @@ void kmsg_dump(enum kmsg_dump_reason reason)
continue;
/* invoke dumper which will iterate over records */
- dumper->dump(dumper, reason);
+ dumper->dump(dumper, &detail);
}
rcu_read_unlock();
}
diff --git a/kernel/profile.c b/kernel/profile.c
index 2b775cc5c28f..1fcf1adcf4eb 100644
--- a/kernel/profile.c
+++ b/kernel/profile.c
@@ -47,30 +47,14 @@ static unsigned short int prof_shift;
int prof_on __read_mostly;
EXPORT_SYMBOL_GPL(prof_on);
-static cpumask_var_t prof_cpu_mask;
-#if defined(CONFIG_SMP) && defined(CONFIG_PROC_FS)
-static DEFINE_PER_CPU(struct profile_hit *[2], cpu_profile_hits);
-static DEFINE_PER_CPU(int, cpu_profile_flip);
-static DEFINE_MUTEX(profile_flip_mutex);
-#endif /* CONFIG_SMP */
-
int profile_setup(char *str)
{
static const char schedstr[] = "schedule";
- static const char sleepstr[] = "sleep";
static const char kvmstr[] = "kvm";
const char *select = NULL;
int par;
- if (!strncmp(str, sleepstr, strlen(sleepstr))) {
-#ifdef CONFIG_SCHEDSTATS
- force_schedstat_enabled();
- prof_on = SLEEP_PROFILING;
- select = sleepstr;
-#else
- pr_warn("kernel sleep profiling requires CONFIG_SCHEDSTATS\n");
-#endif /* CONFIG_SCHEDSTATS */
- } else if (!strncmp(str, schedstr, strlen(schedstr))) {
+ if (!strncmp(str, schedstr, strlen(schedstr))) {
prof_on = SCHED_PROFILING;
select = schedstr;
} else if (!strncmp(str, kvmstr, strlen(kvmstr))) {
@@ -114,11 +98,6 @@ int __ref profile_init(void)
buffer_bytes = prof_len*sizeof(atomic_t);
- if (!alloc_cpumask_var(&prof_cpu_mask, GFP_KERNEL))
- return -ENOMEM;
-
- cpumask_copy(prof_cpu_mask, cpu_possible_mask);
-
prof_buffer = kzalloc(buffer_bytes, GFP_KERNEL|__GFP_NOWARN);
if (prof_buffer)
return 0;
@@ -132,195 +111,16 @@ int __ref profile_init(void)
if (prof_buffer)
return 0;
- free_cpumask_var(prof_cpu_mask);
return -ENOMEM;
}
-#if defined(CONFIG_SMP) && defined(CONFIG_PROC_FS)
-/*
- * Each cpu has a pair of open-addressed hashtables for pending
- * profile hits. read_profile() IPI's all cpus to request them
- * to flip buffers and flushes their contents to prof_buffer itself.
- * Flip requests are serialized by the profile_flip_mutex. The sole
- * use of having a second hashtable is for avoiding cacheline
- * contention that would otherwise happen during flushes of pending
- * profile hits required for the accuracy of reported profile hits
- * and so resurrect the interrupt livelock issue.
- *
- * The open-addressed hashtables are indexed by profile buffer slot
- * and hold the number of pending hits to that profile buffer slot on
- * a cpu in an entry. When the hashtable overflows, all pending hits
- * are accounted to their corresponding profile buffer slots with
- * atomic_add() and the hashtable emptied. As numerous pending hits
- * may be accounted to a profile buffer slot in a hashtable entry,
- * this amortizes a number of atomic profile buffer increments likely
- * to be far larger than the number of entries in the hashtable,
- * particularly given that the number of distinct profile buffer
- * positions to which hits are accounted during short intervals (e.g.
- * several seconds) is usually very small. Exclusion from buffer
- * flipping is provided by interrupt disablement (note that for
- * SCHED_PROFILING or SLEEP_PROFILING profile_hit() may be called from
- * process context).
- * The hash function is meant to be lightweight as opposed to strong,
- * and was vaguely inspired by ppc64 firmware-supported inverted
- * pagetable hash functions, but uses a full hashtable full of finite
- * collision chains, not just pairs of them.
- *
- * -- nyc
- */
-static void __profile_flip_buffers(void *unused)
-{
- int cpu = smp_processor_id();
-
- per_cpu(cpu_profile_flip, cpu) = !per_cpu(cpu_profile_flip, cpu);
-}
-
-static void profile_flip_buffers(void)
-{
- int i, j, cpu;
-
- mutex_lock(&profile_flip_mutex);
- j = per_cpu(cpu_profile_flip, get_cpu());
- put_cpu();
- on_each_cpu(__profile_flip_buffers, NULL, 1);
- for_each_online_cpu(cpu) {
- struct profile_hit *hits = per_cpu(cpu_profile_hits, cpu)[j];
- for (i = 0; i < NR_PROFILE_HIT; ++i) {
- if (!hits[i].hits) {
- if (hits[i].pc)
- hits[i].pc = 0;
- continue;
- }
- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
- hits[i].hits = hits[i].pc = 0;
- }
- }
- mutex_unlock(&profile_flip_mutex);
-}
-
-static void profile_discard_flip_buffers(void)
-{
- int i, cpu;
-
- mutex_lock(&profile_flip_mutex);
- i = per_cpu(cpu_profile_flip, get_cpu());
- put_cpu();
- on_each_cpu(__profile_flip_buffers, NULL, 1);
- for_each_online_cpu(cpu) {
- struct profile_hit *hits = per_cpu(cpu_profile_hits, cpu)[i];
- memset(hits, 0, NR_PROFILE_HIT*sizeof(struct profile_hit));
- }
- mutex_unlock(&profile_flip_mutex);
-}
-
-static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
-{
- unsigned long primary, secondary, flags, pc = (unsigned long)__pc;
- int i, j, cpu;
- struct profile_hit *hits;
-
- pc = min((pc - (unsigned long)_stext) >> prof_shift, prof_len - 1);
- i = primary = (pc & (NR_PROFILE_GRP - 1)) << PROFILE_GRPSHIFT;
- secondary = (~(pc << 1) & (NR_PROFILE_GRP - 1)) << PROFILE_GRPSHIFT;
- cpu = get_cpu();
- hits = per_cpu(cpu_profile_hits, cpu)[per_cpu(cpu_profile_flip, cpu)];
- if (!hits) {
- put_cpu();
- return;
- }
- /*
- * We buffer the global profiler buffer into a per-CPU
- * queue and thus reduce the number of global (and possibly
- * NUMA-alien) accesses. The write-queue is self-coalescing:
- */
- local_irq_save(flags);
- do {
- for (j = 0; j < PROFILE_GRPSZ; ++j) {
- if (hits[i + j].pc == pc) {
- hits[i + j].hits += nr_hits;
- goto out;
- } else if (!hits[i + j].hits) {
- hits[i + j].pc = pc;
- hits[i + j].hits = nr_hits;
- goto out;
- }
- }
- i = (i + secondary) & (NR_PROFILE_HIT - 1);
- } while (i != primary);
-
- /*
- * Add the current hit(s) and flush the write-queue out
- * to the global buffer:
- */
- atomic_add(nr_hits, &prof_buffer[pc]);
- for (i = 0; i < NR_PROFILE_HIT; ++i) {
- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
- hits[i].pc = hits[i].hits = 0;
- }
-out:
- local_irq_restore(flags);
- put_cpu();
-}
-
-static int profile_dead_cpu(unsigned int cpu)
-{
- struct page *page;
- int i;
-
- if (cpumask_available(prof_cpu_mask))
- cpumask_clear_cpu(cpu, prof_cpu_mask);
-
- for (i = 0; i < 2; i++) {
- if (per_cpu(cpu_profile_hits, cpu)[i]) {
- page = virt_to_page(per_cpu(cpu_profile_hits, cpu)[i]);
- per_cpu(cpu_profile_hits, cpu)[i] = NULL;
- __free_page(page);
- }
- }
- return 0;
-}
-
-static int profile_prepare_cpu(unsigned int cpu)
-{
- int i, node = cpu_to_mem(cpu);
- struct page *page;
-
- per_cpu(cpu_profile_flip, cpu) = 0;
-
- for (i = 0; i < 2; i++) {
- if (per_cpu(cpu_profile_hits, cpu)[i])
- continue;
-
- page = __alloc_pages_node(node, GFP_KERNEL | __GFP_ZERO, 0);
- if (!page) {
- profile_dead_cpu(cpu);
- return -ENOMEM;
- }
- per_cpu(cpu_profile_hits, cpu)[i] = page_address(page);
-
- }
- return 0;
-}
-
-static int profile_online_cpu(unsigned int cpu)
-{
- if (cpumask_available(prof_cpu_mask))
- cpumask_set_cpu(cpu, prof_cpu_mask);
-
- return 0;
-}
-
-#else /* !CONFIG_SMP */
-#define profile_flip_buffers() do { } while (0)
-#define profile_discard_flip_buffers() do { } while (0)
-
static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
{
unsigned long pc;
pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift;
- atomic_add(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
+ if (pc < prof_len)
+ atomic_add(nr_hits, &prof_buffer[pc]);
}
-#endif /* !CONFIG_SMP */
void profile_hits(int type, void *__pc, unsigned int nr_hits)
{
@@ -334,8 +134,8 @@ void profile_tick(int type)
{
struct pt_regs *regs = get_irq_regs();
- if (!user_mode(regs) && cpumask_available(prof_cpu_mask) &&
- cpumask_test_cpu(smp_processor_id(), prof_cpu_mask))
+ /* This is the old kernel-only legacy profiling */
+ if (!user_mode(regs))
profile_hit(type, (void *)profile_pc(regs));
}
@@ -358,7 +158,6 @@ read_profile(struct file *file, char __user *buf, size_t count, loff_t *ppos)
char *pnt;
unsigned long sample_step = 1UL << prof_shift;
- profile_flip_buffers();
if (p >= (prof_len+1)*sizeof(unsigned int))
return 0;
if (count > (prof_len+1)*sizeof(unsigned int) - p)
@@ -404,7 +203,6 @@ static ssize_t write_profile(struct file *file, const char __user *buf,
return -EINVAL;
}
#endif
- profile_discard_flip_buffers();
memset(prof_buffer, 0, prof_len * sizeof(atomic_t));
return count;
}
@@ -418,40 +216,14 @@ static const struct proc_ops profile_proc_ops = {
int __ref create_proc_profile(void)
{
struct proc_dir_entry *entry;
-#ifdef CONFIG_SMP
- enum cpuhp_state online_state;
-#endif
-
int err = 0;
if (!prof_on)
return 0;
-#ifdef CONFIG_SMP
- err = cpuhp_setup_state(CPUHP_PROFILE_PREPARE, "PROFILE_PREPARE",
- profile_prepare_cpu, profile_dead_cpu);
- if (err)
- return err;
-
- err = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "AP_PROFILE_ONLINE",
- profile_online_cpu, NULL);
- if (err < 0)
- goto err_state_prep;
- online_state = err;
- err = 0;
-#endif
entry = proc_create("profile", S_IWUSR | S_IRUGO,
NULL, &profile_proc_ops);
- if (!entry)
- goto err_state_onl;
- proc_set_size(entry, (1 + prof_len) * sizeof(atomic_t));
-
- return err;
-err_state_onl:
-#ifdef CONFIG_SMP
- cpuhp_remove_state(online_state);
-err_state_prep:
- cpuhp_remove_state(CPUHP_PROFILE_PREPARE);
-#endif
+ if (entry)
+ proc_set_size(entry, (1 + prof_len) * sizeof(atomic_t));
return err;
}
subsys_initcall(create_proc_profile);
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index a9f655025607..f3951e4a55e5 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -7845,6 +7845,30 @@ void set_rq_offline(struct rq *rq)
}
}
+static inline void sched_set_rq_online(struct rq *rq, int cpu)
+{
+ struct rq_flags rf;
+
+ rq_lock_irqsave(rq, &rf);
+ if (rq->rd) {
+ BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
+ set_rq_online(rq);
+ }
+ rq_unlock_irqrestore(rq, &rf);
+}
+
+static inline void sched_set_rq_offline(struct rq *rq, int cpu)
+{
+ struct rq_flags rf;
+
+ rq_lock_irqsave(rq, &rf);
+ if (rq->rd) {
+ BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
+ set_rq_offline(rq);
+ }
+ rq_unlock_irqrestore(rq, &rf);
+}
+
/*
* used to mark begin/end of suspend/resume:
*/
@@ -7895,10 +7919,25 @@ static int cpuset_cpu_inactive(unsigned int cpu)
return 0;
}
+static inline void sched_smt_present_inc(int cpu)
+{
+#ifdef CONFIG_SCHED_SMT
+ if (cpumask_weight(cpu_smt_mask(cpu)) == 2)
+ static_branch_inc_cpuslocked(&sched_smt_present);
+#endif
+}
+
+static inline void sched_smt_present_dec(int cpu)
+{
+#ifdef CONFIG_SCHED_SMT
+ if (cpumask_weight(cpu_smt_mask(cpu)) == 2)
+ static_branch_dec_cpuslocked(&sched_smt_present);
+#endif
+}
+
int sched_cpu_activate(unsigned int cpu)
{
struct rq *rq = cpu_rq(cpu);
- struct rq_flags rf;
/*
* Clear the balance_push callback and prepare to schedule
@@ -7906,13 +7945,10 @@ int sched_cpu_activate(unsigned int cpu)
*/
balance_push_set(cpu, false);
-#ifdef CONFIG_SCHED_SMT
/*
* When going up, increment the number of cores with SMT present.
*/
- if (cpumask_weight(cpu_smt_mask(cpu)) == 2)
- static_branch_inc_cpuslocked(&sched_smt_present);
-#endif
+ sched_smt_present_inc(cpu);
set_cpu_active(cpu, true);
if (sched_smp_initialized) {
@@ -7930,12 +7966,7 @@ int sched_cpu_activate(unsigned int cpu)
* 2) At runtime, if cpuset_cpu_active() fails to rebuild the
* domains.
*/
- rq_lock_irqsave(rq, &rf);
- if (rq->rd) {
- BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
- set_rq_online(rq);
- }
- rq_unlock_irqrestore(rq, &rf);
+ sched_set_rq_online(rq, cpu);
return 0;
}
@@ -7943,7 +7974,6 @@ int sched_cpu_activate(unsigned int cpu)
int sched_cpu_deactivate(unsigned int cpu)
{
struct rq *rq = cpu_rq(cpu);
- struct rq_flags rf;
int ret;
/*
@@ -7974,20 +8004,14 @@ int sched_cpu_deactivate(unsigned int cpu)
*/
synchronize_rcu();
- rq_lock_irqsave(rq, &rf);
- if (rq->rd) {
- BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
- set_rq_offline(rq);
- }
- rq_unlock_irqrestore(rq, &rf);
+ sched_set_rq_offline(rq, cpu);
-#ifdef CONFIG_SCHED_SMT
/*
* When going down, decrement the number of cores with SMT present.
*/
- if (cpumask_weight(cpu_smt_mask(cpu)) == 2)
- static_branch_dec_cpuslocked(&sched_smt_present);
+ sched_smt_present_dec(cpu);
+#ifdef CONFIG_SCHED_SMT
sched_core_cpu_deactivate(cpu);
#endif
@@ -7997,6 +8021,8 @@ int sched_cpu_deactivate(unsigned int cpu)
sched_update_numa(cpu, false);
ret = cpuset_cpu_inactive(cpu);
if (ret) {
+ sched_smt_present_inc(cpu);
+ sched_set_rq_online(rq, cpu);
balance_push_set(cpu, false);
set_cpu_active(cpu, true);
sched_update_numa(cpu, true);
diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c
index a5e00293ae43..0bed0fa1acd9 100644
--- a/kernel/sched/cputime.c
+++ b/kernel/sched/cputime.c
@@ -582,6 +582,12 @@ void cputime_adjust(struct task_cputime *curr, struct prev_cputime *prev,
}
stime = mul_u64_u64_div_u64(stime, rtime, stime + utime);
+ /*
+ * Because mul_u64_u64_div_u64() can approximate on some
+ * achitectures; enforce the constraint that: a*b/(b+c) <= a.
+ */
+ if (unlikely(stime > rtime))
+ stime = rtime;
update:
/*
diff --git a/kernel/sched/stats.c b/kernel/sched/stats.c
index 78e48f5426ee..eb0cdcd4d921 100644
--- a/kernel/sched/stats.c
+++ b/kernel/sched/stats.c
@@ -92,16 +92,6 @@ void __update_stats_enqueue_sleeper(struct rq *rq, struct task_struct *p,
trace_sched_stat_blocked(p, delta);
- /*
- * Blocking time is in units of nanosecs, so shift by
- * 20 to get a milliseconds-range estimation of the
- * amount of time that the task spent sleeping:
- */
- if (unlikely(prof_on == SLEEP_PROFILING)) {
- profile_hits(SLEEP_PROFILING,
- (void *)get_wchan(p),
- delta >> 20);
- }
account_scheduler_latency(p, delta >> 10, 0);
}
}
diff --git a/kernel/task_work.c b/kernel/task_work.c
index 5c2daa7ad3f9..5d14d639ac71 100644
--- a/kernel/task_work.c
+++ b/kernel/task_work.c
@@ -6,12 +6,14 @@
static struct callback_head work_exited; /* all we need is ->next == NULL */
+#ifdef CONFIG_IRQ_WORK
static void task_work_set_notify_irq(struct irq_work *entry)
{
test_and_set_tsk_thread_flag(current, TIF_NOTIFY_RESUME);
}
static DEFINE_PER_CPU(struct irq_work, irq_work_NMI_resume) =
IRQ_WORK_INIT_HARD(task_work_set_notify_irq);
+#endif
/**
* task_work_add - ask the @task to execute @work->func()
@@ -57,6 +59,8 @@ int task_work_add(struct task_struct *task, struct callback_head *work,
if (notify == TWA_NMI_CURRENT) {
if (WARN_ON_ONCE(task != current))
return -EINVAL;
+ if (!IS_ENABLED(CONFIG_IRQ_WORK))
+ return -EINVAL;
} else {
/* record the work call stack in order to print it in KASAN reports */
kasan_record_aux_stack(work);
@@ -81,9 +85,11 @@ int task_work_add(struct task_struct *task, struct callback_head *work,
case TWA_SIGNAL_NO_IPI:
__set_notify_signal(task);
break;
+#ifdef CONFIG_IRQ_WORK
case TWA_NMI_CURRENT:
irq_work_queue(this_cpu_ptr(&irq_work_NMI_resume));
break;
+#endif
default:
WARN_ON_ONCE(1);
break;
diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c
index d25ba49e313c..d0538a75f4c6 100644
--- a/kernel/time/clocksource.c
+++ b/kernel/time/clocksource.c
@@ -246,7 +246,7 @@ static enum wd_read_status cs_watchdog_read(struct clocksource *cs, u64 *csnow,
wd_delay = cycles_to_nsec_safe(watchdog, *wdnow, wd_end);
if (wd_delay <= WATCHDOG_MAX_SKEW) {
- if (nretries > 1 || nretries >= max_retries) {
+ if (nretries > 1 && nretries >= max_retries) {
pr_warn("timekeeping watchdog on CPU%d: %s retried %d times before success\n",
smp_processor_id(), watchdog->name, nretries);
}
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
index b4843099a8da..ed58eebb4e8f 100644
--- a/kernel/time/tick-broadcast.c
+++ b/kernel/time/tick-broadcast.c
@@ -1141,7 +1141,6 @@ void tick_broadcast_switch_to_oneshot(void)
#ifdef CONFIG_HOTPLUG_CPU
void hotplug_cpu__broadcast_tick_pull(int deadcpu)
{
- struct tick_device *td = this_cpu_ptr(&tick_cpu_device);
struct clock_event_device *bc;
unsigned long flags;
@@ -1167,6 +1166,8 @@ void hotplug_cpu__broadcast_tick_pull(int deadcpu)
* device to avoid the starvation.
*/
if (tick_check_broadcast_expired()) {
+ struct tick_device *td = this_cpu_ptr(&tick_cpu_device);
+
cpumask_clear_cpu(smp_processor_id(), tick_broadcast_force_mask);
tick_program_event(td->evtdev->next_event, 1);
}
diff --git a/kernel/trace/preemptirq_delay_test.c b/kernel/trace/preemptirq_delay_test.c
index cb0871fbdb07..314ffc143039 100644
--- a/kernel/trace/preemptirq_delay_test.c
+++ b/kernel/trace/preemptirq_delay_test.c
@@ -34,8 +34,6 @@ MODULE_PARM_DESC(cpu_affinity, "Cpu num test is running on");
static struct completion done;
-#define MIN(x, y) ((x) < (y) ? (x) : (y))
-
static void busy_wait(ulong time)
{
u64 start, end;
diff --git a/lib/btree.c b/lib/btree.c
index 49420cae3a83..bb81d3393ac5 100644
--- a/lib/btree.c
+++ b/lib/btree.c
@@ -43,7 +43,6 @@
#include <linux/slab.h>
#include <linux/module.h>
-#define MAX(a, b) ((a) > (b) ? (a) : (b))
#define NODESIZE MAX(L1_CACHE_BYTES, 128)
struct btree_geo {
diff --git a/lib/decompress_unlzma.c b/lib/decompress_unlzma.c
index 20a858031f12..9d34d35908da 100644
--- a/lib/decompress_unlzma.c
+++ b/lib/decompress_unlzma.c
@@ -37,7 +37,9 @@
#include <linux/decompress/mm.h>
+#ifndef MIN
#define MIN(a, b) (((a) < (b)) ? (a) : (b))
+#endif
static long long INIT read_int(unsigned char *ptr, int size)
{
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
index cdd4e2314bfc..2d71b1115916 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -1080,7 +1080,7 @@ char *resource_string(char *buf, char *end, struct resource *res,
#define FLAG_BUF_SIZE (2 * sizeof(res->flags))
#define DECODED_BUF_SIZE sizeof("[mem - 64bit pref window disabled]")
#define RAW_BUF_SIZE sizeof("[mem - flags 0x]")
- char sym[max(2*RSRC_BUF_SIZE + DECODED_BUF_SIZE,
+ char sym[MAX(2*RSRC_BUF_SIZE + DECODED_BUF_SIZE,
2*RSRC_BUF_SIZE + FLAG_BUF_SIZE + RAW_BUF_SIZE)];
char *p = sym, *pend = sym + sizeof(sym);
diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
index 5d6581ab7c07..2d3163e4da96 100644
--- a/mm/zsmalloc.c
+++ b/mm/zsmalloc.c
@@ -120,8 +120,6 @@
#define CLASS_BITS 8
#define MAGIC_VAL_BITS 8
-#define MAX(a, b) ((a) >= (b) ? (a) : (b))
-
#define ZS_MAX_PAGES_PER_ZSPAGE (_AC(CONFIG_ZSMALLOC_CHAIN_SIZE, UL))
/* ZS_MIN_ALLOC_SIZE must be multiple of ZS_ALIGN */
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index 8a4ebd93adfc..06da8ac13dca 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -119,13 +119,6 @@ void hci_discovery_set_state(struct hci_dev *hdev, int state)
case DISCOVERY_STARTING:
break;
case DISCOVERY_FINDING:
- /* If discovery was not started then it was initiated by the
- * MGMT interface so no MGMT event shall be generated either
- */
- if (old_state != DISCOVERY_STARTING) {
- hdev->discovery.state = old_state;
- return;
- }
mgmt_discovering(hdev, 1);
break;
case DISCOVERY_RESOLVING:
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index dce8035ca799..d0c118c47f6c 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -1721,9 +1721,10 @@ static void le_set_scan_enable_complete(struct hci_dev *hdev, u8 enable)
switch (enable) {
case LE_SCAN_ENABLE:
hci_dev_set_flag(hdev, HCI_LE_SCAN);
- if (hdev->le_scan_type == LE_SCAN_ACTIVE)
+ if (hdev->le_scan_type == LE_SCAN_ACTIVE) {
clear_pending_adv_report(hdev);
- hci_discovery_set_state(hdev, DISCOVERY_FINDING);
+ hci_discovery_set_state(hdev, DISCOVERY_FINDING);
+ }
break;
case LE_SCAN_DISABLE:
diff --git a/net/bluetooth/hci_sync.c b/net/bluetooth/hci_sync.c
index cd2ed16da8a4..a31d39a821f4 100644
--- a/net/bluetooth/hci_sync.c
+++ b/net/bluetooth/hci_sync.c
@@ -2976,6 +2976,27 @@ static int hci_passive_scan_sync(struct hci_dev *hdev)
*/
filter_policy = hci_update_accept_list_sync(hdev);
+ /* If suspended and filter_policy set to 0x00 (no acceptlist) then
+ * passive scanning cannot be started since that would require the host
+ * to be woken up to process the reports.
+ */
+ if (hdev->suspended && !filter_policy) {
+ /* Check if accept list is empty then there is no need to scan
+ * while suspended.
+ */
+ if (list_empty(&hdev->le_accept_list))
+ return 0;
+
+ /* If there are devices is the accept_list that means some
+ * devices could not be programmed which in non-suspended case
+ * means filter_policy needs to be set to 0x00 so the host needs
+ * to filter, but since this is treating suspended case we
+ * can ignore device needing host to filter to allow devices in
+ * the acceptlist to be able to wakeup the system.
+ */
+ filter_policy = 0x01;
+ }
+
/* When the controller is using random resolvable addresses and
* with that having LE privacy enabled, then controllers with
* Extended Scanner Filter Policies support can now enable support
diff --git a/net/core/dev.c b/net/core/dev.c
index 6ea1d20676fb..751d9b70e6ad 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -5150,6 +5150,7 @@ int do_xdp_generic(struct bpf_prog *xdp_prog, struct sk_buff **pskb)
bpf_net_ctx_clear(bpf_net_ctx);
return XDP_DROP;
}
+ bpf_net_ctx_clear(bpf_net_ctx);
}
return XDP_PASS;
out_redir:
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 87e67194f240..73fd7f543fd0 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -3288,7 +3288,7 @@ static int rtnl_dellink(struct sk_buff *skb, struct nlmsghdr *nlh,
if (ifm->ifi_index > 0)
dev = __dev_get_by_index(tgt_net, ifm->ifi_index);
else if (tb[IFLA_IFNAME] || tb[IFLA_ALT_IFNAME])
- dev = rtnl_dev_get(net, tb);
+ dev = rtnl_dev_get(tgt_net, tb);
else if (tb[IFLA_GROUP])
err = rtnl_group_dellink(tgt_net, nla_get_u32(tb[IFLA_GROUP]));
else
diff --git a/net/ethtool/ioctl.c b/net/ethtool/ioctl.c
index 983fee76f5cf..8ca13208d240 100644
--- a/net/ethtool/ioctl.c
+++ b/net/ethtool/ioctl.c
@@ -1331,13 +1331,13 @@ static noinline_for_stack int ethtool_set_rxfh(struct net_device *dev,
u32 rss_cfg_offset = offsetof(struct ethtool_rxfh, rss_config[0]);
const struct ethtool_ops *ops = dev->ethtool_ops;
u32 dev_indir_size = 0, dev_key_size = 0, i;
+ u32 user_indir_len = 0, indir_bytes = 0;
struct ethtool_rxfh_param rxfh_dev = {};
struct ethtool_rxfh_context *ctx = NULL;
struct netlink_ext_ack *extack = NULL;
struct ethtool_rxnfc rx_rings;
struct ethtool_rxfh rxfh;
bool locked = false; /* dev->ethtool->rss_lock taken */
- u32 indir_bytes = 0;
bool create = false;
u8 *rss_config;
int ret;
@@ -1382,10 +1382,9 @@ static noinline_for_stack int ethtool_set_rxfh(struct net_device *dev,
rxfh.input_xfrm == RXH_XFRM_NO_CHANGE))
return -EINVAL;
- if (rxfh.indir_size != ETH_RXFH_INDIR_NO_CHANGE)
- indir_bytes = dev_indir_size * sizeof(rxfh_dev.indir[0]);
+ indir_bytes = dev_indir_size * sizeof(rxfh_dev.indir[0]);
- rss_config = kzalloc(indir_bytes + rxfh.key_size, GFP_USER);
+ rss_config = kzalloc(indir_bytes + dev_key_size, GFP_USER);
if (!rss_config)
return -ENOMEM;
@@ -1400,6 +1399,7 @@ static noinline_for_stack int ethtool_set_rxfh(struct net_device *dev,
*/
if (rxfh.indir_size &&
rxfh.indir_size != ETH_RXFH_INDIR_NO_CHANGE) {
+ user_indir_len = indir_bytes;
rxfh_dev.indir = (u32 *)rss_config;
rxfh_dev.indir_size = dev_indir_size;
ret = ethtool_copy_validate_indir(rxfh_dev.indir,
@@ -1426,7 +1426,7 @@ static noinline_for_stack int ethtool_set_rxfh(struct net_device *dev,
rxfh_dev.key_size = dev_key_size;
rxfh_dev.key = rss_config + indir_bytes;
if (copy_from_user(rxfh_dev.key,
- useraddr + rss_cfg_offset + indir_bytes,
+ useraddr + rss_cfg_offset + user_indir_len,
rxfh.key_size)) {
ret = -EFAULT;
goto out;
@@ -1474,16 +1474,21 @@ static noinline_for_stack int ethtool_set_rxfh(struct net_device *dev,
rxfh_dev.input_xfrm = rxfh.input_xfrm;
if (rxfh.rss_context && ops->create_rxfh_context) {
- if (create)
+ if (create) {
ret = ops->create_rxfh_context(dev, ctx, &rxfh_dev,
extack);
- else if (rxfh_dev.rss_delete)
+ /* Make sure driver populates defaults */
+ WARN_ON_ONCE(!ret && !rxfh_dev.key &&
+ !memchr_inv(ethtool_rxfh_context_key(ctx),
+ 0, ctx->key_size));
+ } else if (rxfh_dev.rss_delete) {
ret = ops->remove_rxfh_context(dev, ctx,
rxfh.rss_context,
extack);
- else
+ } else {
ret = ops->modify_rxfh_context(dev, ctx, &rxfh_dev,
extack);
+ }
} else {
ret = ops->set_rxfh(dev, &rxfh_dev, extack);
}
@@ -1522,6 +1527,22 @@ static noinline_for_stack int ethtool_set_rxfh(struct net_device *dev,
kfree(ctx);
goto out;
}
+
+ /* Fetch the defaults for the old API, in the new API drivers
+ * should write defaults into ctx themselves.
+ */
+ rxfh_dev.indir = (u32 *)rss_config;
+ rxfh_dev.indir_size = dev_indir_size;
+
+ rxfh_dev.key = rss_config + indir_bytes;
+ rxfh_dev.key_size = dev_key_size;
+
+ ret = ops->get_rxfh(dev, &rxfh_dev);
+ if (WARN_ON(ret)) {
+ xa_erase(&dev->ethtool->rss_ctx, rxfh.rss_context);
+ kfree(ctx);
+ goto out;
+ }
}
if (rxfh_dev.rss_delete) {
WARN_ON(xa_erase(&dev->ethtool->rss_ctx, rxfh.rss_context) != ctx);
@@ -1530,12 +1551,14 @@ static noinline_for_stack int ethtool_set_rxfh(struct net_device *dev,
if (rxfh_dev.indir) {
for (i = 0; i < dev_indir_size; i++)
ethtool_rxfh_context_indir(ctx)[i] = rxfh_dev.indir[i];
- ctx->indir_configured = 1;
+ ctx->indir_configured =
+ rxfh.indir_size &&
+ rxfh.indir_size != ETH_RXFH_INDIR_NO_CHANGE;
}
if (rxfh_dev.key) {
memcpy(ethtool_rxfh_context_key(ctx), rxfh_dev.key,
dev_key_size);
- ctx->key_configured = 1;
+ ctx->key_configured = !!rxfh.key_size;
}
if (rxfh_dev.hfunc != ETH_RSS_HASH_NO_CHANGE)
ctx->hfunc = rxfh_dev.hfunc;
diff --git a/net/ethtool/rss.c b/net/ethtool/rss.c
index 71679137eff2..5c4c4505ab9a 100644
--- a/net/ethtool/rss.c
+++ b/net/ethtool/rss.c
@@ -111,7 +111,8 @@ rss_reply_size(const struct ethnl_req_info *req_base,
const struct rss_reply_data *data = RSS_REPDATA(reply_base);
int len;
- len = nla_total_size(sizeof(u32)) + /* _RSS_HFUNC */
+ len = nla_total_size(sizeof(u32)) + /* _RSS_CONTEXT */
+ nla_total_size(sizeof(u32)) + /* _RSS_HFUNC */
nla_total_size(sizeof(u32)) + /* _RSS_INPUT_XFRM */
nla_total_size(sizeof(u32) * data->indir_size) + /* _RSS_INDIR */
nla_total_size(data->hkey_size); /* _RSS_HKEY */
@@ -124,6 +125,11 @@ rss_fill_reply(struct sk_buff *skb, const struct ethnl_req_info *req_base,
const struct ethnl_reply_data *reply_base)
{
const struct rss_reply_data *data = RSS_REPDATA(reply_base);
+ struct rss_req_info *request = RSS_REQINFO(req_base);
+
+ if (request->rss_context &&
+ nla_put_u32(skb, ETHTOOL_A_RSS_CONTEXT, request->rss_context))
+ return -EMSGSIZE;
if ((data->hfunc &&
nla_put_u32(skb, ETHTOOL_A_RSS_HFUNC, data->hfunc)) ||
diff --git a/net/ipv4/netfilter/iptable_nat.c b/net/ipv4/netfilter/iptable_nat.c
index 4d42d0756fd7..a5db7c67d61b 100644
--- a/net/ipv4/netfilter/iptable_nat.c
+++ b/net/ipv4/netfilter/iptable_nat.c
@@ -145,25 +145,27 @@ static struct pernet_operations iptable_nat_net_ops = {
static int __init iptable_nat_init(void)
{
- int ret = xt_register_template(&nf_nat_ipv4_table,
- iptable_nat_table_init);
+ int ret;
+ /* net->gen->ptr[iptable_nat_net_id] must be allocated
+ * before calling iptable_nat_table_init().
+ */
+ ret = register_pernet_subsys(&iptable_nat_net_ops);
if (ret < 0)
return ret;
- ret = register_pernet_subsys(&iptable_nat_net_ops);
- if (ret < 0) {
- xt_unregister_template(&nf_nat_ipv4_table);
- return ret;
- }
+ ret = xt_register_template(&nf_nat_ipv4_table,
+ iptable_nat_table_init);
+ if (ret < 0)
+ unregister_pernet_subsys(&iptable_nat_net_ops);
return ret;
}
static void __exit iptable_nat_exit(void)
{
- unregister_pernet_subsys(&iptable_nat_net_ops);
xt_unregister_template(&nf_nat_ipv4_table);
+ unregister_pernet_subsys(&iptable_nat_net_ops);
}
module_init(iptable_nat_init);
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 454362e359da..e2b9583ed96a 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -754,8 +754,7 @@ void tcp_rcv_space_adjust(struct sock *sk)
* <prev RTT . ><current RTT .. ><next RTT .... >
*/
- if (READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_moderate_rcvbuf) &&
- !(sk->sk_userlocks & SOCK_RCVBUF_LOCK)) {
+ if (READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_moderate_rcvbuf)) {
u64 rcvwin, grow;
int rcvbuf;
@@ -771,12 +770,22 @@ void tcp_rcv_space_adjust(struct sock *sk)
rcvbuf = min_t(u64, tcp_space_from_win(sk, rcvwin),
READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_rmem[2]));
- if (rcvbuf > sk->sk_rcvbuf) {
- WRITE_ONCE(sk->sk_rcvbuf, rcvbuf);
+ if (!(sk->sk_userlocks & SOCK_RCVBUF_LOCK)) {
+ if (rcvbuf > sk->sk_rcvbuf) {
+ WRITE_ONCE(sk->sk_rcvbuf, rcvbuf);
- /* Make the window clamp follow along. */
- WRITE_ONCE(tp->window_clamp,
- tcp_win_from_space(sk, rcvbuf));
+ /* Make the window clamp follow along. */
+ WRITE_ONCE(tp->window_clamp,
+ tcp_win_from_space(sk, rcvbuf));
+ }
+ } else {
+ /* Make the window clamp follow along while being bounded
+ * by SO_RCVBUF.
+ */
+ int clamp = tcp_win_from_space(sk, min(rcvbuf, sk->sk_rcvbuf));
+
+ if (clamp > tp->window_clamp)
+ WRITE_ONCE(tp->window_clamp, clamp);
}
}
tp->rcvq_space.space = copied;
diff --git a/net/ipv4/tcp_offload.c b/net/ipv4/tcp_offload.c
index 4b791e74529e..e4ad3311e148 100644
--- a/net/ipv4/tcp_offload.c
+++ b/net/ipv4/tcp_offload.c
@@ -140,6 +140,9 @@ struct sk_buff *tcp_gso_segment(struct sk_buff *skb,
if (thlen < sizeof(*th))
goto out;
+ if (unlikely(skb_checksum_start(skb) != skb_transport_header(skb)))
+ goto out;
+
if (!pskb_may_pull(skb, thlen))
goto out;
diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c
index aa2e0a28ca61..bc8a9da750fe 100644
--- a/net/ipv4/udp_offload.c
+++ b/net/ipv4/udp_offload.c
@@ -278,6 +278,10 @@ struct sk_buff *__udp_gso_segment(struct sk_buff *gso_skb,
if (gso_skb->len <= sizeof(*uh) + mss)
return ERR_PTR(-EINVAL);
+ if (unlikely(skb_checksum_start(gso_skb) !=
+ skb_transport_header(gso_skb)))
+ return ERR_PTR(-EINVAL);
+
if (skb_gso_ok(gso_skb, features | NETIF_F_GSO_ROBUST)) {
/* Packet is from an untrusted source, reset gso_segs. */
skb_shinfo(gso_skb)->gso_segs = DIV_ROUND_UP(gso_skb->len - sizeof(*uh),
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
index 70a0b2ad6bd7..b8eec1b6cc2c 100644
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -227,6 +227,7 @@ struct ndisc_options *ndisc_parse_options(const struct net_device *dev,
return NULL;
memset(ndopts, 0, sizeof(*ndopts));
while (opt_len) {
+ bool unknown = false;
int l;
if (opt_len < sizeof(struct nd_opt_hdr))
return NULL;
@@ -262,22 +263,23 @@ struct ndisc_options *ndisc_parse_options(const struct net_device *dev,
break;
#endif
default:
- if (ndisc_is_useropt(dev, nd_opt)) {
- ndopts->nd_useropts_end = nd_opt;
- if (!ndopts->nd_useropts)
- ndopts->nd_useropts = nd_opt;
- } else {
- /*
- * Unknown options must be silently ignored,
- * to accommodate future extension to the
- * protocol.
- */
- ND_PRINTK(2, notice,
- "%s: ignored unsupported option; type=%d, len=%d\n",
- __func__,
- nd_opt->nd_opt_type,
- nd_opt->nd_opt_len);
- }
+ unknown = true;
+ }
+ if (ndisc_is_useropt(dev, nd_opt)) {
+ ndopts->nd_useropts_end = nd_opt;
+ if (!ndopts->nd_useropts)
+ ndopts->nd_useropts = nd_opt;
+ } else if (unknown) {
+ /*
+ * Unknown options must be silently ignored,
+ * to accommodate future extension to the
+ * protocol.
+ */
+ ND_PRINTK(2, notice,
+ "%s: ignored unsupported option; type=%d, len=%d\n",
+ __func__,
+ nd_opt->nd_opt_type,
+ nd_opt->nd_opt_len);
}
next_opt:
opt_len -= l;
diff --git a/net/ipv6/netfilter/ip6table_nat.c b/net/ipv6/netfilter/ip6table_nat.c
index 52cf104e3478..e119d4f090cc 100644
--- a/net/ipv6/netfilter/ip6table_nat.c
+++ b/net/ipv6/netfilter/ip6table_nat.c
@@ -147,23 +147,27 @@ static struct pernet_operations ip6table_nat_net_ops = {
static int __init ip6table_nat_init(void)
{
- int ret = xt_register_template(&nf_nat_ipv6_table,
- ip6table_nat_table_init);
+ int ret;
+ /* net->gen->ptr[ip6table_nat_net_id] must be allocated
+ * before calling ip6t_nat_register_lookups().
+ */
+ ret = register_pernet_subsys(&ip6table_nat_net_ops);
if (ret < 0)
return ret;
- ret = register_pernet_subsys(&ip6table_nat_net_ops);
+ ret = xt_register_template(&nf_nat_ipv6_table,
+ ip6table_nat_table_init);
if (ret)
- xt_unregister_template(&nf_nat_ipv6_table);
+ unregister_pernet_subsys(&ip6table_nat_net_ops);
return ret;
}
static void __exit ip6table_nat_exit(void)
{
- unregister_pernet_subsys(&ip6table_nat_net_ops);
xt_unregister_template(&nf_nat_ipv6_table);
+ unregister_pernet_subsys(&ip6table_nat_net_ops);
}
module_init(ip6table_nat_init);
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
index c3b0b610b0aa..c00323fa9eb6 100644
--- a/net/iucv/af_iucv.c
+++ b/net/iucv/af_iucv.c
@@ -335,8 +335,8 @@ static void iucv_sever_path(struct sock *sk, int with_user_data)
struct iucv_sock *iucv = iucv_sk(sk);
struct iucv_path *path = iucv->path;
- if (iucv->path) {
- iucv->path = NULL;
+ /* Whoever resets the path pointer, must sever and free it. */
+ if (xchg(&iucv->path, NULL)) {
if (with_user_data) {
low_nmcpy(user_data, iucv->src_name);
high_nmcpy(user_data, iucv->dst_name);
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index 85cb71de370f..b02b84ce2130 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -114,7 +114,7 @@ static int ieee80211_set_mon_options(struct ieee80211_sub_if_data *sdata,
/* apply all changes now - no failures allowed */
- if (monitor_sdata)
+ if (monitor_sdata && ieee80211_hw_check(&local->hw, WANT_MONITOR_VIF))
ieee80211_set_mu_mimo_follow(monitor_sdata, params);
if (params->flags) {
@@ -3053,6 +3053,9 @@ static int ieee80211_set_tx_power(struct wiphy *wiphy,
sdata = IEEE80211_WDEV_TO_SUB_IF(wdev);
if (sdata->vif.type == NL80211_IFTYPE_MONITOR) {
+ if (!ieee80211_hw_check(&local->hw, WANT_MONITOR_VIF))
+ return -EOPNOTSUPP;
+
sdata = wiphy_dereference(local->hw.wiphy,
local->monitor_sdata);
if (!sdata)
@@ -3115,7 +3118,7 @@ static int ieee80211_set_tx_power(struct wiphy *wiphy,
if (has_monitor) {
sdata = wiphy_dereference(local->hw.wiphy,
local->monitor_sdata);
- if (sdata) {
+ if (sdata && ieee80211_hw_check(&local->hw, WANT_MONITOR_VIF)) {
sdata->deflink.user_power_level = local->user_power_level;
if (txp_type != sdata->vif.bss_conf.txpower_type)
update_txp_type = true;
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index 72a9ba8bc5fd..edba4a31844f 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -1768,7 +1768,7 @@ static bool __ieee80211_tx(struct ieee80211_local *local,
break;
}
sdata = rcu_dereference(local->monitor_sdata);
- if (sdata) {
+ if (sdata && ieee80211_hw_check(&local->hw, WANT_MONITOR_VIF)) {
vif = &sdata->vif;
info->hw_queue =
vif->hw_queue[skb_get_queue_mapping(skb)];
@@ -3957,7 +3957,8 @@ begin:
break;
}
tx.sdata = rcu_dereference(local->monitor_sdata);
- if (tx.sdata) {
+ if (tx.sdata &&
+ ieee80211_hw_check(&local->hw, WANT_MONITOR_VIF)) {
vif = &tx.sdata->vif;
info->hw_queue =
vif->hw_queue[skb_get_queue_mapping(skb)];
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index ced19ce7c51a..c7ad9bc5973a 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -776,7 +776,7 @@ static void __iterate_interfaces(struct ieee80211_local *local,
sdata = rcu_dereference_check(local->monitor_sdata,
lockdep_is_held(&local->iflist_mtx) ||
lockdep_is_held(&local->hw.wiphy->mtx));
- if (sdata &&
+ if (sdata && ieee80211_hw_check(&local->hw, WANT_MONITOR_VIF) &&
(iter_flags & IEEE80211_IFACE_ITER_RESUME_ALL || !active_only ||
sdata->flags & IEEE80211_SDATA_IN_DRIVER))
iterator(data, sdata->vif.addr, &sdata->vif);
diff --git a/net/mptcp/mib.c b/net/mptcp/mib.c
index c30405e76833..7884217f33eb 100644
--- a/net/mptcp/mib.c
+++ b/net/mptcp/mib.c
@@ -19,7 +19,9 @@ static const struct snmp_mib mptcp_snmp_list[] = {
SNMP_MIB_ITEM("MPTCPRetrans", MPTCP_MIB_RETRANSSEGS),
SNMP_MIB_ITEM("MPJoinNoTokenFound", MPTCP_MIB_JOINNOTOKEN),
SNMP_MIB_ITEM("MPJoinSynRx", MPTCP_MIB_JOINSYNRX),
+ SNMP_MIB_ITEM("MPJoinSynBackupRx", MPTCP_MIB_JOINSYNBACKUPRX),
SNMP_MIB_ITEM("MPJoinSynAckRx", MPTCP_MIB_JOINSYNACKRX),
+ SNMP_MIB_ITEM("MPJoinSynAckBackupRx", MPTCP_MIB_JOINSYNACKBACKUPRX),
SNMP_MIB_ITEM("MPJoinSynAckHMacFailure", MPTCP_MIB_JOINSYNACKMAC),
SNMP_MIB_ITEM("MPJoinAckRx", MPTCP_MIB_JOINACKRX),
SNMP_MIB_ITEM("MPJoinAckHMacFailure", MPTCP_MIB_JOINACKMAC),
diff --git a/net/mptcp/mib.h b/net/mptcp/mib.h
index 2704afd0dfe4..66aa67f49d03 100644
--- a/net/mptcp/mib.h
+++ b/net/mptcp/mib.h
@@ -14,7 +14,9 @@ enum linux_mptcp_mib_field {
MPTCP_MIB_RETRANSSEGS, /* Segments retransmitted at the MPTCP-level */
MPTCP_MIB_JOINNOTOKEN, /* Received MP_JOIN but the token was not found */
MPTCP_MIB_JOINSYNRX, /* Received a SYN + MP_JOIN */
+ MPTCP_MIB_JOINSYNBACKUPRX, /* Received a SYN + MP_JOIN + backup flag */
MPTCP_MIB_JOINSYNACKRX, /* Received a SYN/ACK + MP_JOIN */
+ MPTCP_MIB_JOINSYNACKBACKUPRX, /* Received a SYN/ACK + MP_JOIN + backup flag */
MPTCP_MIB_JOINSYNACKMAC, /* HMAC was wrong on SYN/ACK + MP_JOIN */
MPTCP_MIB_JOINACKRX, /* Received an ACK + MP_JOIN */
MPTCP_MIB_JOINACKMAC, /* HMAC was wrong on ACK + MP_JOIN */
diff --git a/net/mptcp/options.c b/net/mptcp/options.c
index 8e8dcfbc2993..8a68382a4fe9 100644
--- a/net/mptcp/options.c
+++ b/net/mptcp/options.c
@@ -909,7 +909,7 @@ bool mptcp_synack_options(const struct request_sock *req, unsigned int *size,
return true;
} else if (subflow_req->mp_join) {
opts->suboptions = OPTION_MPTCP_MPJ_SYNACK;
- opts->backup = subflow_req->backup;
+ opts->backup = subflow_req->request_bkup;
opts->join_id = subflow_req->local_id;
opts->thmac = subflow_req->thmac;
opts->nonce = subflow_req->local_nonce;
diff --git a/net/mptcp/pm.c b/net/mptcp/pm.c
index 55406720c607..23bb89c94e90 100644
--- a/net/mptcp/pm.c
+++ b/net/mptcp/pm.c
@@ -426,6 +426,18 @@ int mptcp_pm_get_local_id(struct mptcp_sock *msk, struct sock_common *skc)
return mptcp_pm_nl_get_local_id(msk, &skc_local);
}
+bool mptcp_pm_is_backup(struct mptcp_sock *msk, struct sock_common *skc)
+{
+ struct mptcp_addr_info skc_local;
+
+ mptcp_local_address((struct sock_common *)skc, &skc_local);
+
+ if (mptcp_pm_is_userspace(msk))
+ return mptcp_userspace_pm_is_backup(msk, &skc_local);
+
+ return mptcp_pm_nl_is_backup(msk, &skc_local);
+}
+
int mptcp_pm_get_flags_and_ifindex_by_id(struct mptcp_sock *msk, unsigned int id,
u8 *flags, int *ifindex)
{
diff --git a/net/mptcp/pm_netlink.c b/net/mptcp/pm_netlink.c
index ea9e5817b9e9..37954a0b087d 100644
--- a/net/mptcp/pm_netlink.c
+++ b/net/mptcp/pm_netlink.c
@@ -471,7 +471,6 @@ static void __mptcp_pm_send_ack(struct mptcp_sock *msk, struct mptcp_subflow_con
slow = lock_sock_fast(ssk);
if (prio) {
subflow->send_mp_prio = 1;
- subflow->backup = backup;
subflow->request_bkup = backup;
}
@@ -1102,6 +1101,24 @@ int mptcp_pm_nl_get_local_id(struct mptcp_sock *msk, struct mptcp_addr_info *skc
return ret;
}
+bool mptcp_pm_nl_is_backup(struct mptcp_sock *msk, struct mptcp_addr_info *skc)
+{
+ struct pm_nl_pernet *pernet = pm_nl_get_pernet_from_msk(msk);
+ struct mptcp_pm_addr_entry *entry;
+ bool backup = false;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(entry, &pernet->local_addr_list, list) {
+ if (mptcp_addresses_equal(&entry->addr, skc, entry->addr.port)) {
+ backup = !!(entry->flags & MPTCP_PM_ADDR_FLAG_BACKUP);
+ break;
+ }
+ }
+ rcu_read_unlock();
+
+ return backup;
+}
+
#define MPTCP_PM_CMD_GRP_OFFSET 0
#define MPTCP_PM_EV_GRP_OFFSET 1
@@ -1401,6 +1418,7 @@ static bool mptcp_pm_remove_anno_addr(struct mptcp_sock *msk,
ret = remove_anno_list_by_saddr(msk, addr);
if (ret || force) {
spin_lock_bh(&msk->pm.lock);
+ msk->pm.add_addr_signaled -= ret;
mptcp_pm_remove_addr(msk, &list);
spin_unlock_bh(&msk->pm.lock);
}
@@ -1534,16 +1552,25 @@ void mptcp_pm_remove_addrs(struct mptcp_sock *msk, struct list_head *rm_list)
{
struct mptcp_rm_list alist = { .nr = 0 };
struct mptcp_pm_addr_entry *entry;
+ int anno_nr = 0;
list_for_each_entry(entry, rm_list, list) {
- if ((remove_anno_list_by_saddr(msk, &entry->addr) ||
- lookup_subflow_by_saddr(&msk->conn_list, &entry->addr)) &&
- alist.nr < MPTCP_RM_IDS_MAX)
- alist.ids[alist.nr++] = entry->addr.id;
+ if (alist.nr >= MPTCP_RM_IDS_MAX)
+ break;
+
+ /* only delete if either announced or matching a subflow */
+ if (remove_anno_list_by_saddr(msk, &entry->addr))
+ anno_nr++;
+ else if (!lookup_subflow_by_saddr(&msk->conn_list,
+ &entry->addr))
+ continue;
+
+ alist.ids[alist.nr++] = entry->addr.id;
}
if (alist.nr) {
spin_lock_bh(&msk->pm.lock);
+ msk->pm.add_addr_signaled -= anno_nr;
mptcp_pm_remove_addr(msk, &alist);
spin_unlock_bh(&msk->pm.lock);
}
@@ -1556,17 +1583,18 @@ static void mptcp_pm_remove_addrs_and_subflows(struct mptcp_sock *msk,
struct mptcp_pm_addr_entry *entry;
list_for_each_entry(entry, rm_list, list) {
- if (lookup_subflow_by_saddr(&msk->conn_list, &entry->addr) &&
- slist.nr < MPTCP_RM_IDS_MAX)
+ if (slist.nr < MPTCP_RM_IDS_MAX &&
+ lookup_subflow_by_saddr(&msk->conn_list, &entry->addr))
slist.ids[slist.nr++] = entry->addr.id;
- if (remove_anno_list_by_saddr(msk, &entry->addr) &&
- alist.nr < MPTCP_RM_IDS_MAX)
+ if (alist.nr < MPTCP_RM_IDS_MAX &&
+ remove_anno_list_by_saddr(msk, &entry->addr))
alist.ids[alist.nr++] = entry->addr.id;
}
if (alist.nr) {
spin_lock_bh(&msk->pm.lock);
+ msk->pm.add_addr_signaled -= alist.nr;
mptcp_pm_remove_addr(msk, &alist);
spin_unlock_bh(&msk->pm.lock);
}
diff --git a/net/mptcp/pm_userspace.c b/net/mptcp/pm_userspace.c
index f0a4590506c6..8eaa9fbe3e34 100644
--- a/net/mptcp/pm_userspace.c
+++ b/net/mptcp/pm_userspace.c
@@ -165,6 +165,24 @@ int mptcp_userspace_pm_get_local_id(struct mptcp_sock *msk,
return mptcp_userspace_pm_append_new_local_addr(msk, &new_entry, true);
}
+bool mptcp_userspace_pm_is_backup(struct mptcp_sock *msk,
+ struct mptcp_addr_info *skc)
+{
+ struct mptcp_pm_addr_entry *entry;
+ bool backup = false;
+
+ spin_lock_bh(&msk->pm.lock);
+ list_for_each_entry(entry, &msk->pm.userspace_pm_local_addr_list, list) {
+ if (mptcp_addresses_equal(&entry->addr, skc, false)) {
+ backup = !!(entry->flags & MPTCP_PM_ADDR_FLAG_BACKUP);
+ break;
+ }
+ }
+ spin_unlock_bh(&msk->pm.lock);
+
+ return backup;
+}
+
int mptcp_pm_nl_announce_doit(struct sk_buff *skb, struct genl_info *info)
{
struct nlattr *token = info->attrs[MPTCP_PM_ATTR_TOKEN];
diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
index a26c2c840fd9..0d536b183a6c 100644
--- a/net/mptcp/protocol.c
+++ b/net/mptcp/protocol.c
@@ -350,8 +350,10 @@ static bool __mptcp_move_skb(struct mptcp_sock *msk, struct sock *ssk,
skb_orphan(skb);
/* try to fetch required memory from subflow */
- if (!mptcp_rmem_schedule(sk, ssk, skb->truesize))
+ if (!mptcp_rmem_schedule(sk, ssk, skb->truesize)) {
+ MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_RCVPRUNED);
goto drop;
+ }
has_rxtstamp = TCP_SKB_CB(skb)->has_rxtstamp;
@@ -844,10 +846,8 @@ void mptcp_data_ready(struct sock *sk, struct sock *ssk)
sk_rbuf = ssk_rbuf;
/* over limit? can't append more skbs to msk, Also, no need to wake-up*/
- if (__mptcp_rmem(sk) > sk_rbuf) {
- MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_RCVPRUNED);
+ if (__mptcp_rmem(sk) > sk_rbuf)
return;
- }
/* Wake-up the reader only for in-sequence data */
mptcp_data_lock(sk);
@@ -1422,13 +1422,15 @@ struct sock *mptcp_subflow_get_send(struct mptcp_sock *msk)
}
mptcp_for_each_subflow(msk, subflow) {
+ bool backup = subflow->backup || subflow->request_bkup;
+
trace_mptcp_subflow_get_send(subflow);
ssk = mptcp_subflow_tcp_sock(subflow);
if (!mptcp_subflow_active(subflow))
continue;
tout = max(tout, mptcp_timeout_from_subflow(subflow));
- nr_active += !subflow->backup;
+ nr_active += !backup;
pace = subflow->avg_pacing_rate;
if (unlikely(!pace)) {
/* init pacing rate from socket */
@@ -1439,9 +1441,9 @@ struct sock *mptcp_subflow_get_send(struct mptcp_sock *msk)
}
linger_time = div_u64((u64)READ_ONCE(ssk->sk_wmem_queued) << 32, pace);
- if (linger_time < send_info[subflow->backup].linger_time) {
- send_info[subflow->backup].ssk = ssk;
- send_info[subflow->backup].linger_time = linger_time;
+ if (linger_time < send_info[backup].linger_time) {
+ send_info[backup].ssk = ssk;
+ send_info[backup].linger_time = linger_time;
}
}
__mptcp_set_timeout(sk, tout);
diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h
index b11a4e50d52b..60c6b073d65f 100644
--- a/net/mptcp/protocol.h
+++ b/net/mptcp/protocol.h
@@ -448,6 +448,7 @@ struct mptcp_subflow_request_sock {
u16 mp_capable : 1,
mp_join : 1,
backup : 1,
+ request_bkup : 1,
csum_reqd : 1,
allow_join_id0 : 1;
u8 local_id;
@@ -1108,6 +1109,9 @@ bool mptcp_pm_rm_addr_signal(struct mptcp_sock *msk, unsigned int remaining,
int mptcp_pm_get_local_id(struct mptcp_sock *msk, struct sock_common *skc);
int mptcp_pm_nl_get_local_id(struct mptcp_sock *msk, struct mptcp_addr_info *skc);
int mptcp_userspace_pm_get_local_id(struct mptcp_sock *msk, struct mptcp_addr_info *skc);
+bool mptcp_pm_is_backup(struct mptcp_sock *msk, struct sock_common *skc);
+bool mptcp_pm_nl_is_backup(struct mptcp_sock *msk, struct mptcp_addr_info *skc);
+bool mptcp_userspace_pm_is_backup(struct mptcp_sock *msk, struct mptcp_addr_info *skc);
int mptcp_pm_dump_addr(struct sk_buff *msg, struct netlink_callback *cb);
int mptcp_pm_nl_dump_addr(struct sk_buff *msg,
struct netlink_callback *cb);
diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c
index 39e2cbdf3801..a21c712350c3 100644
--- a/net/mptcp/subflow.c
+++ b/net/mptcp/subflow.c
@@ -100,6 +100,7 @@ static struct mptcp_sock *subflow_token_join_request(struct request_sock *req)
return NULL;
}
subflow_req->local_id = local_id;
+ subflow_req->request_bkup = mptcp_pm_is_backup(msk, (struct sock_common *)req);
return msk;
}
@@ -168,6 +169,9 @@ static int subflow_check_req(struct request_sock *req,
return 0;
} else if (opt_mp_join) {
SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINSYNRX);
+
+ if (mp_opt.backup)
+ SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINSYNBACKUPRX);
}
if (opt_mp_capable && listener->request_mptcp) {
@@ -577,6 +581,9 @@ static void subflow_finish_connect(struct sock *sk, const struct sk_buff *skb)
subflow->mp_join = 1;
MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_JOINSYNACKRX);
+ if (subflow->backup)
+ MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_JOINSYNACKBACKUPRX);
+
if (subflow_use_different_dport(msk, sk)) {
pr_debug("synack inet_dport=%d %d",
ntohs(inet_sk(sk)->inet_dport),
@@ -614,6 +621,8 @@ static int subflow_chk_local_id(struct sock *sk)
return err;
subflow_set_local_id(subflow, err);
+ subflow->request_bkup = mptcp_pm_is_backup(msk, (struct sock_common *)sk);
+
return 0;
}
@@ -1221,14 +1230,22 @@ static void mptcp_subflow_discard_data(struct sock *ssk, struct sk_buff *skb,
{
struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
bool fin = TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN;
- u32 incr;
+ struct tcp_sock *tp = tcp_sk(ssk);
+ u32 offset, incr, avail_len;
- incr = limit >= skb->len ? skb->len + fin : limit;
+ offset = tp->copied_seq - TCP_SKB_CB(skb)->seq;
+ if (WARN_ON_ONCE(offset > skb->len))
+ goto out;
- pr_debug("discarding=%d len=%d seq=%d", incr, skb->len,
- subflow->map_subflow_seq);
+ avail_len = skb->len - offset;
+ incr = limit >= avail_len ? avail_len + fin : limit;
+
+ pr_debug("discarding=%d len=%d offset=%d seq=%d", incr, skb->len,
+ offset, subflow->map_subflow_seq);
MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_DUPDATA);
tcp_sk(ssk)->copied_seq += incr;
+
+out:
if (!before(tcp_sk(ssk)->copied_seq, TCP_SKB_CB(skb)->end_seq))
sk_eat_skb(ssk, skb);
if (mptcp_subflow_get_map_offset(subflow) >= subflow->map_data_len)
@@ -2005,6 +2022,7 @@ static void subflow_ulp_clone(const struct request_sock *req,
new_ctx->fully_established = 1;
new_ctx->remote_key_valid = 1;
new_ctx->backup = subflow_req->backup;
+ new_ctx->request_bkup = subflow_req->request_bkup;
WRITE_ONCE(new_ctx->remote_id, subflow_req->remote_id);
new_ctx->token = subflow_req->token;
new_ctx->thmac = subflow_req->thmac;
diff --git a/net/sched/act_ct.c b/net/sched/act_ct.c
index 113b907da0f7..3ba8e7e739b5 100644
--- a/net/sched/act_ct.c
+++ b/net/sched/act_ct.c
@@ -44,6 +44,8 @@ static DEFINE_MUTEX(zones_mutex);
struct zones_ht_key {
struct net *net;
u16 zone;
+ /* Note : pad[] must be the last field. */
+ u8 pad[];
};
struct tcf_ct_flow_table {
@@ -60,7 +62,7 @@ struct tcf_ct_flow_table {
static const struct rhashtable_params zones_params = {
.head_offset = offsetof(struct tcf_ct_flow_table, node),
.key_offset = offsetof(struct tcf_ct_flow_table, key),
- .key_len = sizeof_field(struct tcf_ct_flow_table, key),
+ .key_len = offsetof(struct zones_ht_key, pad),
.automatic_shrinking = true,
};
diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c
index 73a875573e7a..8e3093938cd2 100644
--- a/net/smc/af_smc.c
+++ b/net/smc/af_smc.c
@@ -3319,10 +3319,8 @@ int smc_create_clcsk(struct net *net, struct sock *sk, int family)
rc = sock_create_kern(net, family, SOCK_STREAM, IPPROTO_TCP,
&smc->clcsock);
- if (rc) {
- sk_common_release(sk);
+ if (rc)
return rc;
- }
/* smc_clcsock_release() does not wait smc->clcsock->sk's
* destruction; its sk_state might not be TCP_CLOSE after
@@ -3368,6 +3366,9 @@ static int __smc_create(struct net *net, struct socket *sock, int protocol,
smc->clcsock = clcsock;
else
rc = smc_create_clcsk(net, sk, family);
+
+ if (rc)
+ sk_common_release(sk);
out:
return rc;
}
diff --git a/net/wireless/scan.c b/net/wireless/scan.c
index d99319d82205..64eeed82d43d 100644
--- a/net/wireless/scan.c
+++ b/net/wireless/scan.c
@@ -3178,8 +3178,7 @@ cfg80211_inform_bss_frame_data(struct wiphy *wiphy,
struct ieee80211_mgmt *mgmt, size_t len,
gfp_t gfp)
{
- size_t min_hdr_len = offsetof(struct ieee80211_mgmt,
- u.probe_resp.variable);
+ size_t min_hdr_len;
struct ieee80211_ext *ext = NULL;
enum cfg80211_bss_frame_type ftype;
u16 beacon_interval;
@@ -3202,10 +3201,16 @@ cfg80211_inform_bss_frame_data(struct wiphy *wiphy,
if (ieee80211_is_s1g_beacon(mgmt->frame_control)) {
ext = (void *) mgmt;
- min_hdr_len = offsetof(struct ieee80211_ext, u.s1g_beacon);
if (ieee80211_is_s1g_short_beacon(mgmt->frame_control))
min_hdr_len = offsetof(struct ieee80211_ext,
u.s1g_short_beacon.variable);
+ else
+ min_hdr_len = offsetof(struct ieee80211_ext,
+ u.s1g_beacon.variable);
+ } else {
+ /* same for beacons */
+ min_hdr_len = offsetof(struct ieee80211_mgmt,
+ u.probe_resp.variable);
}
if (WARN_ON(len < min_hdr_len))
diff --git a/net/wireless/sme.c b/net/wireless/sme.c
index e419aa8c4a5a..d9d7bf8bb5c1 100644
--- a/net/wireless/sme.c
+++ b/net/wireless/sme.c
@@ -1045,6 +1045,7 @@ void cfg80211_connect_done(struct net_device *dev,
cfg80211_hold_bss(
bss_from_pub(params->links[link].bss));
ev->cr.links[link].bss = params->links[link].bss;
+ ev->cr.links[link].status = params->links[link].status;
if (params->links[link].addr) {
ev->cr.links[link].addr = next;
diff --git a/scripts/syscall.tbl b/scripts/syscall.tbl
index 591d85e8ca7e..4586a18dfe9b 100644
--- a/scripts/syscall.tbl
+++ b/scripts/syscall.tbl
@@ -98,9 +98,9 @@
77 common tee sys_tee
78 common readlinkat sys_readlinkat
79 stat64 fstatat64 sys_fstatat64
-79 newstat fstatat sys_newfstatat
+79 64 newfstatat sys_newfstatat
80 stat64 fstat64 sys_fstat64
-80 newstat fstat sys_newfstat
+80 64 newfstat sys_newfstat
81 common sync sys_sync
82 common fsync sys_fsync
83 common fdatasync sys_fdatasync
@@ -402,4 +402,3 @@
460 common lsm_set_self_attr sys_lsm_set_self_attr
461 common lsm_list_modules sys_lsm_list_modules
462 common mseal sys_mseal
-467 common uretprobe sys_uretprobe
diff --git a/sound/core/seq/seq_ports.h b/sound/core/seq/seq_ports.h
index b111382f697a..9e36738c0dd0 100644
--- a/sound/core/seq/seq_ports.h
+++ b/sound/core/seq/seq_ports.h
@@ -7,6 +7,7 @@
#define __SND_SEQ_PORTS_H
#include <sound/seq_kernel.h>
+#include <sound/ump_convert.h>
#include "seq_lock.h"
/* list of 'exported' ports */
@@ -42,17 +43,6 @@ struct snd_seq_port_subs_info {
int (*close)(void *private_data, struct snd_seq_port_subscribe *info);
};
-/* context for converting from legacy control event to UMP packet */
-struct snd_seq_ump_midi2_bank {
- bool rpn_set;
- bool nrpn_set;
- bool bank_set;
- unsigned char cc_rpn_msb, cc_rpn_lsb;
- unsigned char cc_nrpn_msb, cc_nrpn_lsb;
- unsigned char cc_data_msb, cc_data_lsb;
- unsigned char cc_bank_msb, cc_bank_lsb;
-};
-
struct snd_seq_client_port {
struct snd_seq_addr addr; /* client/port number */
@@ -88,7 +78,7 @@ struct snd_seq_client_port {
unsigned char ump_group;
#if IS_ENABLED(CONFIG_SND_SEQ_UMP)
- struct snd_seq_ump_midi2_bank midi2_bank[16]; /* per channel */
+ struct ump_cvt_to_ump_bank midi2_bank[16]; /* per channel */
#endif
};
diff --git a/sound/core/seq/seq_ump_convert.c b/sound/core/seq/seq_ump_convert.c
index e90b27a135e6..4dd540cbb1cb 100644
--- a/sound/core/seq/seq_ump_convert.c
+++ b/sound/core/seq/seq_ump_convert.c
@@ -368,7 +368,7 @@ static int cvt_ump_midi1_to_midi2(struct snd_seq_client *dest,
struct snd_seq_ump_event ev_cvt;
const union snd_ump_midi1_msg *midi1 = (const union snd_ump_midi1_msg *)event->ump;
union snd_ump_midi2_msg *midi2 = (union snd_ump_midi2_msg *)ev_cvt.ump;
- struct snd_seq_ump_midi2_bank *cc;
+ struct ump_cvt_to_ump_bank *cc;
ev_cvt = *event;
memset(&ev_cvt.ump, 0, sizeof(ev_cvt.ump));
@@ -789,28 +789,45 @@ static int paf_ev_to_ump_midi2(const struct snd_seq_event *event,
return 1;
}
+static void reset_rpn(struct ump_cvt_to_ump_bank *cc)
+{
+ cc->rpn_set = 0;
+ cc->nrpn_set = 0;
+ cc->cc_rpn_msb = cc->cc_rpn_lsb = 0;
+ cc->cc_data_msb = cc->cc_data_lsb = 0;
+ cc->cc_data_msb_set = cc->cc_data_lsb_set = 0;
+}
+
/* set up the MIDI2 RPN/NRPN packet data from the parsed info */
-static void fill_rpn(struct snd_seq_ump_midi2_bank *cc,
- union snd_ump_midi2_msg *data,
- unsigned char channel)
+static int fill_rpn(struct ump_cvt_to_ump_bank *cc,
+ union snd_ump_midi2_msg *data,
+ unsigned char channel,
+ bool flush)
{
+ if (!(cc->cc_data_lsb_set || cc->cc_data_msb_set))
+ return 0; // skip
+ /* when not flushing, wait for complete data set */
+ if (!flush && (!cc->cc_data_lsb_set || !cc->cc_data_msb_set))
+ return 0; // skip
+
if (cc->rpn_set) {
data->rpn.status = UMP_MSG_STATUS_RPN;
data->rpn.bank = cc->cc_rpn_msb;
data->rpn.index = cc->cc_rpn_lsb;
- cc->rpn_set = 0;
- cc->cc_rpn_msb = cc->cc_rpn_lsb = 0;
- } else {
+ } else if (cc->nrpn_set) {
data->rpn.status = UMP_MSG_STATUS_NRPN;
data->rpn.bank = cc->cc_nrpn_msb;
data->rpn.index = cc->cc_nrpn_lsb;
- cc->nrpn_set = 0;
- cc->cc_nrpn_msb = cc->cc_nrpn_lsb = 0;
+ } else {
+ return 0; // skip
}
+
data->rpn.data = upscale_14_to_32bit((cc->cc_data_msb << 7) |
cc->cc_data_lsb);
data->rpn.channel = channel;
- cc->cc_data_msb = cc->cc_data_lsb = 0;
+
+ reset_rpn(cc);
+ return 1;
}
/* convert CC event to MIDI 2.0 UMP */
@@ -822,29 +839,39 @@ static int cc_ev_to_ump_midi2(const struct snd_seq_event *event,
unsigned char channel = event->data.control.channel & 0x0f;
unsigned char index = event->data.control.param & 0x7f;
unsigned char val = event->data.control.value & 0x7f;
- struct snd_seq_ump_midi2_bank *cc = &dest_port->midi2_bank[channel];
+ struct ump_cvt_to_ump_bank *cc = &dest_port->midi2_bank[channel];
+ int ret;
/* process special CC's (bank/rpn/nrpn) */
switch (index) {
case UMP_CC_RPN_MSB:
+ ret = fill_rpn(cc, data, channel, true);
cc->rpn_set = 1;
cc->cc_rpn_msb = val;
- return 0; // skip
+ if (cc->cc_rpn_msb == 0x7f && cc->cc_rpn_lsb == 0x7f)
+ reset_rpn(cc);
+ return ret;
case UMP_CC_RPN_LSB:
+ ret = fill_rpn(cc, data, channel, true);
cc->rpn_set = 1;
cc->cc_rpn_lsb = val;
- return 0; // skip
+ if (cc->cc_rpn_msb == 0x7f && cc->cc_rpn_lsb == 0x7f)
+ reset_rpn(cc);
+ return ret;
case UMP_CC_NRPN_MSB:
+ ret = fill_rpn(cc, data, channel, true);
cc->nrpn_set = 1;
cc->cc_nrpn_msb = val;
- return 0; // skip
+ return ret;
case UMP_CC_NRPN_LSB:
+ ret = fill_rpn(cc, data, channel, true);
cc->nrpn_set = 1;
cc->cc_nrpn_lsb = val;
- return 0; // skip
+ return ret;
case UMP_CC_DATA:
+ cc->cc_data_msb_set = 1;
cc->cc_data_msb = val;
- return 0; // skip
+ return fill_rpn(cc, data, channel, false);
case UMP_CC_BANK_SELECT:
cc->bank_set = 1;
cc->cc_bank_msb = val;
@@ -854,11 +881,9 @@ static int cc_ev_to_ump_midi2(const struct snd_seq_event *event,
cc->cc_bank_lsb = val;
return 0; // skip
case UMP_CC_DATA_LSB:
+ cc->cc_data_lsb_set = 1;
cc->cc_data_lsb = val;
- if (!(cc->rpn_set || cc->nrpn_set))
- return 0; // skip
- fill_rpn(cc, data, channel);
- return 1;
+ return fill_rpn(cc, data, channel, false);
}
data->cc.status = status;
@@ -887,7 +912,7 @@ static int pgm_ev_to_ump_midi2(const struct snd_seq_event *event,
unsigned char status)
{
unsigned char channel = event->data.control.channel & 0x0f;
- struct snd_seq_ump_midi2_bank *cc = &dest_port->midi2_bank[channel];
+ struct ump_cvt_to_ump_bank *cc = &dest_port->midi2_bank[channel];
data->pg.status = status;
data->pg.channel = channel;
@@ -924,8 +949,9 @@ static int ctrl14_ev_to_ump_midi2(const struct snd_seq_event *event,
{
unsigned char channel = event->data.control.channel & 0x0f;
unsigned char index = event->data.control.param & 0x7f;
- struct snd_seq_ump_midi2_bank *cc = &dest_port->midi2_bank[channel];
+ struct ump_cvt_to_ump_bank *cc = &dest_port->midi2_bank[channel];
unsigned char msb, lsb;
+ int ret;
msb = (event->data.control.value >> 7) & 0x7f;
lsb = event->data.control.value & 0x7f;
@@ -939,28 +965,27 @@ static int ctrl14_ev_to_ump_midi2(const struct snd_seq_event *event,
cc->cc_bank_lsb = lsb;
return 0; // skip
case UMP_CC_RPN_MSB:
- cc->cc_rpn_msb = msb;
- fallthrough;
case UMP_CC_RPN_LSB:
- cc->rpn_set = 1;
+ ret = fill_rpn(cc, data, channel, true);
+ cc->cc_rpn_msb = msb;
cc->cc_rpn_lsb = lsb;
- return 0; // skip
+ cc->rpn_set = 1;
+ if (cc->cc_rpn_msb == 0x7f && cc->cc_rpn_lsb == 0x7f)
+ reset_rpn(cc);
+ return ret;
case UMP_CC_NRPN_MSB:
- cc->cc_nrpn_msb = msb;
- fallthrough;
case UMP_CC_NRPN_LSB:
+ ret = fill_rpn(cc, data, channel, true);
+ cc->cc_nrpn_msb = msb;
cc->nrpn_set = 1;
cc->cc_nrpn_lsb = lsb;
- return 0; // skip
+ return ret;
case UMP_CC_DATA:
- cc->cc_data_msb = msb;
- fallthrough;
case UMP_CC_DATA_LSB:
+ cc->cc_data_msb_set = cc->cc_data_lsb_set = 1;
+ cc->cc_data_msb = msb;
cc->cc_data_lsb = lsb;
- if (!(cc->rpn_set || cc->nrpn_set))
- return 0; // skip
- fill_rpn(cc, data, channel);
- return 1;
+ return fill_rpn(cc, data, channel, false);
}
data->cc.status = UMP_MSG_STATUS_CC;
@@ -1192,44 +1217,53 @@ static int cvt_sysex_to_ump(struct snd_seq_client *dest,
{
struct snd_seq_ump_event ev_cvt;
unsigned char status;
- u8 buf[6], *xbuf;
+ u8 buf[8], *xbuf;
int offset = 0;
int len, err;
+ bool finished = false;
if (!snd_seq_ev_is_variable(event))
return 0;
setup_ump_event(&ev_cvt, event);
- for (;;) {
+ while (!finished) {
len = snd_seq_expand_var_event_at(event, sizeof(buf), buf, offset);
if (len <= 0)
break;
- if (WARN_ON(len > 6))
+ if (WARN_ON(len > sizeof(buf)))
break;
- offset += len;
+
xbuf = buf;
+ status = UMP_SYSEX_STATUS_CONTINUE;
+ /* truncate the sysex start-marker */
if (*xbuf == UMP_MIDI1_MSG_SYSEX_START) {
status = UMP_SYSEX_STATUS_START;
- xbuf++;
len--;
- if (len > 0 && xbuf[len - 1] == UMP_MIDI1_MSG_SYSEX_END) {
+ offset++;
+ xbuf++;
+ }
+
+ /* if the last of this packet or the 1st byte of the next packet
+ * is the end-marker, finish the transfer with this packet
+ */
+ if (len > 0 && len < 8 &&
+ xbuf[len - 1] == UMP_MIDI1_MSG_SYSEX_END) {
+ if (status == UMP_SYSEX_STATUS_START)
status = UMP_SYSEX_STATUS_SINGLE;
- len--;
- }
- } else {
- if (xbuf[len - 1] == UMP_MIDI1_MSG_SYSEX_END) {
+ else
status = UMP_SYSEX_STATUS_END;
- len--;
- } else {
- status = UMP_SYSEX_STATUS_CONTINUE;
- }
+ len--;
+ finished = true;
}
+
+ len = min(len, 6);
fill_sysex7_ump(dest_port, ev_cvt.ump, status, xbuf, len);
err = __snd_seq_deliver_single_event(dest, dest_port,
(struct snd_seq_event *)&ev_cvt,
atomic, hop);
if (err < 0)
return err;
+ offset += len;
}
return 0;
}
diff --git a/sound/core/ump_convert.c b/sound/core/ump_convert.c
index f67c44c83fde..0fe13d031656 100644
--- a/sound/core/ump_convert.c
+++ b/sound/core/ump_convert.c
@@ -287,25 +287,42 @@ static int cvt_legacy_system_to_ump(struct ump_cvt_to_ump *cvt,
return 4;
}
-static void fill_rpn(struct ump_cvt_to_ump_bank *cc,
- union snd_ump_midi2_msg *midi2)
+static void reset_rpn(struct ump_cvt_to_ump_bank *cc)
{
+ cc->rpn_set = 0;
+ cc->nrpn_set = 0;
+ cc->cc_rpn_msb = cc->cc_rpn_lsb = 0;
+ cc->cc_data_msb = cc->cc_data_lsb = 0;
+ cc->cc_data_msb_set = cc->cc_data_lsb_set = 0;
+}
+
+static int fill_rpn(struct ump_cvt_to_ump_bank *cc,
+ union snd_ump_midi2_msg *midi2,
+ bool flush)
+{
+ if (!(cc->cc_data_lsb_set || cc->cc_data_msb_set))
+ return 0; // skip
+ /* when not flushing, wait for complete data set */
+ if (!flush && (!cc->cc_data_lsb_set || !cc->cc_data_msb_set))
+ return 0; // skip
+
if (cc->rpn_set) {
midi2->rpn.status = UMP_MSG_STATUS_RPN;
midi2->rpn.bank = cc->cc_rpn_msb;
midi2->rpn.index = cc->cc_rpn_lsb;
- cc->rpn_set = 0;
- cc->cc_rpn_msb = cc->cc_rpn_lsb = 0;
- } else {
+ } else if (cc->nrpn_set) {
midi2->rpn.status = UMP_MSG_STATUS_NRPN;
midi2->rpn.bank = cc->cc_nrpn_msb;
midi2->rpn.index = cc->cc_nrpn_lsb;
- cc->nrpn_set = 0;
- cc->cc_nrpn_msb = cc->cc_nrpn_lsb = 0;
+ } else {
+ return 0; // skip
}
+
midi2->rpn.data = upscale_14_to_32bit((cc->cc_data_msb << 7) |
cc->cc_data_lsb);
- cc->cc_data_msb = cc->cc_data_lsb = 0;
+
+ reset_rpn(cc);
+ return 1;
}
/* convert to a MIDI 1.0 Channel Voice message */
@@ -318,6 +335,7 @@ static int cvt_legacy_cmd_to_ump(struct ump_cvt_to_ump *cvt,
struct ump_cvt_to_ump_bank *cc;
union snd_ump_midi2_msg *midi2 = (union snd_ump_midi2_msg *)data;
unsigned char status, channel;
+ int ret;
BUILD_BUG_ON(sizeof(union snd_ump_midi1_msg) != 4);
BUILD_BUG_ON(sizeof(union snd_ump_midi2_msg) != 8);
@@ -358,24 +376,33 @@ static int cvt_legacy_cmd_to_ump(struct ump_cvt_to_ump *cvt,
case UMP_MSG_STATUS_CC:
switch (buf[1]) {
case UMP_CC_RPN_MSB:
+ ret = fill_rpn(cc, midi2, true);
cc->rpn_set = 1;
cc->cc_rpn_msb = buf[2];
- return 0; // skip
+ if (cc->cc_rpn_msb == 0x7f && cc->cc_rpn_lsb == 0x7f)
+ reset_rpn(cc);
+ return ret;
case UMP_CC_RPN_LSB:
+ ret = fill_rpn(cc, midi2, true);
cc->rpn_set = 1;
cc->cc_rpn_lsb = buf[2];
- return 0; // skip
+ if (cc->cc_rpn_msb == 0x7f && cc->cc_rpn_lsb == 0x7f)
+ reset_rpn(cc);
+ return ret;
case UMP_CC_NRPN_MSB:
+ ret = fill_rpn(cc, midi2, true);
cc->nrpn_set = 1;
cc->cc_nrpn_msb = buf[2];
- return 0; // skip
+ return ret;
case UMP_CC_NRPN_LSB:
+ ret = fill_rpn(cc, midi2, true);
cc->nrpn_set = 1;
cc->cc_nrpn_lsb = buf[2];
- return 0; // skip
+ return ret;
case UMP_CC_DATA:
+ cc->cc_data_msb_set = 1;
cc->cc_data_msb = buf[2];
- return 0; // skip
+ return fill_rpn(cc, midi2, false);
case UMP_CC_BANK_SELECT:
cc->bank_set = 1;
cc->cc_bank_msb = buf[2];
@@ -385,12 +412,9 @@ static int cvt_legacy_cmd_to_ump(struct ump_cvt_to_ump *cvt,
cc->cc_bank_lsb = buf[2];
return 0; // skip
case UMP_CC_DATA_LSB:
+ cc->cc_data_lsb_set = 1;
cc->cc_data_lsb = buf[2];
- if (cc->rpn_set || cc->nrpn_set)
- fill_rpn(cc, midi2);
- else
- return 0; // skip
- break;
+ return fill_rpn(cc, midi2, false);
default:
midi2->cc.index = buf[1];
midi2->cc.data = upscale_7_to_32bit(buf[2]);
diff --git a/sound/firewire/amdtp-stream.c b/sound/firewire/amdtp-stream.c
index 1a163bbcabd7..c827d7d8d800 100644
--- a/sound/firewire/amdtp-stream.c
+++ b/sound/firewire/amdtp-stream.c
@@ -77,6 +77,8 @@
// overrun. Actual device can skip more, then this module stops the packet streaming.
#define IR_JUMBO_PAYLOAD_MAX_SKIP_CYCLES 5
+static void pcm_period_work(struct work_struct *work);
+
/**
* amdtp_stream_init - initialize an AMDTP stream structure
* @s: the AMDTP stream to initialize
@@ -105,6 +107,7 @@ int amdtp_stream_init(struct amdtp_stream *s, struct fw_unit *unit,
s->flags = flags;
s->context = ERR_PTR(-1);
mutex_init(&s->mutex);
+ INIT_WORK(&s->period_work, pcm_period_work);
s->packet_index = 0;
init_waitqueue_head(&s->ready_wait);
@@ -347,6 +350,7 @@ EXPORT_SYMBOL(amdtp_stream_get_max_payload);
*/
void amdtp_stream_pcm_prepare(struct amdtp_stream *s)
{
+ cancel_work_sync(&s->period_work);
s->pcm_buffer_pointer = 0;
s->pcm_period_pointer = 0;
}
@@ -611,19 +615,21 @@ static void update_pcm_pointers(struct amdtp_stream *s,
// The program in user process should periodically check the status of intermediate
// buffer associated to PCM substream to process PCM frames in the buffer, instead
// of receiving notification of period elapsed by poll wait.
- if (!pcm->runtime->no_period_wakeup) {
- if (in_softirq()) {
- // In software IRQ context for 1394 OHCI.
- snd_pcm_period_elapsed(pcm);
- } else {
- // In process context of ALSA PCM application under acquired lock of
- // PCM substream.
- snd_pcm_period_elapsed_under_stream_lock(pcm);
- }
- }
+ if (!pcm->runtime->no_period_wakeup)
+ queue_work(system_highpri_wq, &s->period_work);
}
}
+static void pcm_period_work(struct work_struct *work)
+{
+ struct amdtp_stream *s = container_of(work, struct amdtp_stream,
+ period_work);
+ struct snd_pcm_substream *pcm = READ_ONCE(s->pcm);
+
+ if (pcm)
+ snd_pcm_period_elapsed(pcm);
+}
+
static int queue_packet(struct amdtp_stream *s, struct fw_iso_packet *params,
bool sched_irq)
{
@@ -1849,11 +1855,14 @@ unsigned long amdtp_domain_stream_pcm_pointer(struct amdtp_domain *d,
{
struct amdtp_stream *irq_target = d->irq_target;
- // Process isochronous packets queued till recent isochronous cycle to handle PCM frames.
if (irq_target && amdtp_stream_running(irq_target)) {
- // In software IRQ context, the call causes dead-lock to disable the tasklet
- // synchronously.
- if (!in_softirq())
+ // use wq to prevent AB/BA deadlock competition for
+ // substream lock:
+ // fw_iso_context_flush_completions() acquires
+ // lock by ohci_flush_iso_completions(),
+ // amdtp-stream process_rx_packets() attempts to
+ // acquire same lock by snd_pcm_elapsed()
+ if (current_work() != &s->period_work)
fw_iso_context_flush_completions(irq_target->context);
}
@@ -1909,6 +1918,7 @@ static void amdtp_stream_stop(struct amdtp_stream *s)
return;
}
+ cancel_work_sync(&s->period_work);
fw_iso_context_stop(s->context);
fw_iso_context_destroy(s->context);
s->context = ERR_PTR(-1);
diff --git a/sound/firewire/amdtp-stream.h b/sound/firewire/amdtp-stream.h
index a1ed2e80f91a..775db3fc4959 100644
--- a/sound/firewire/amdtp-stream.h
+++ b/sound/firewire/amdtp-stream.h
@@ -191,6 +191,7 @@ struct amdtp_stream {
/* For a PCM substream processing. */
struct snd_pcm_substream *pcm;
+ struct work_struct period_work;
snd_pcm_uframes_t pcm_buffer_pointer;
unsigned int pcm_period_pointer;
unsigned int pcm_frame_multiplier;
diff --git a/sound/pci/hda/hda_controller.h b/sound/pci/hda/hda_controller.h
index c2d0109866e6..68c883f202ca 100644
--- a/sound/pci/hda/hda_controller.h
+++ b/sound/pci/hda/hda_controller.h
@@ -28,7 +28,7 @@
#else
#define AZX_DCAPS_I915_COMPONENT 0 /* NOP */
#endif
-/* 14 unused */
+#define AZX_DCAPS_AMD_ALLOC_FIX (1 << 14) /* AMD allocation workaround */
#define AZX_DCAPS_CTX_WORKAROUND (1 << 15) /* X-Fi workaround */
#define AZX_DCAPS_POSFIX_LPIB (1 << 16) /* Use LPIB as default */
#define AZX_DCAPS_AMD_WORKAROUND (1 << 17) /* AMD-specific workaround */
diff --git a/sound/pci/hda/hda_generic.c b/sound/pci/hda/hda_generic.c
index f64d9dc197a3..9cff87dfbecb 100644
--- a/sound/pci/hda/hda_generic.c
+++ b/sound/pci/hda/hda_generic.c
@@ -4955,6 +4955,69 @@ void snd_hda_gen_stream_pm(struct hda_codec *codec, hda_nid_t nid, bool on)
}
EXPORT_SYMBOL_GPL(snd_hda_gen_stream_pm);
+/* forcibly mute the speaker output without caching; return true if updated */
+static bool force_mute_output_path(struct hda_codec *codec, hda_nid_t nid)
+{
+ if (!nid)
+ return false;
+ if (!nid_has_mute(codec, nid, HDA_OUTPUT))
+ return false; /* no mute, skip */
+ if (snd_hda_codec_amp_read(codec, nid, 0, HDA_OUTPUT, 0) &
+ snd_hda_codec_amp_read(codec, nid, 1, HDA_OUTPUT, 0) &
+ HDA_AMP_MUTE)
+ return false; /* both channels already muted, skip */
+
+ /* direct amp update without caching */
+ snd_hda_codec_write(codec, nid, 0, AC_VERB_SET_AMP_GAIN_MUTE,
+ AC_AMP_SET_OUTPUT | AC_AMP_SET_LEFT |
+ AC_AMP_SET_RIGHT | HDA_AMP_MUTE);
+ return true;
+}
+
+/**
+ * snd_hda_gen_shutup_speakers - Forcibly mute the speaker outputs
+ * @codec: the HDA codec
+ *
+ * Forcibly mute the speaker outputs, to be called at suspend or shutdown.
+ *
+ * The mute state done by this function isn't cached, hence the original state
+ * will be restored at resume.
+ *
+ * Return true if the mute state has been changed.
+ */
+bool snd_hda_gen_shutup_speakers(struct hda_codec *codec)
+{
+ struct hda_gen_spec *spec = codec->spec;
+ const int *paths;
+ const struct nid_path *path;
+ int i, p, num_paths;
+ bool updated = false;
+
+ /* if already powered off, do nothing */
+ if (!snd_hdac_is_power_on(&codec->core))
+ return false;
+
+ if (spec->autocfg.line_out_type == AUTO_PIN_SPEAKER_OUT) {
+ paths = spec->out_paths;
+ num_paths = spec->autocfg.line_outs;
+ } else {
+ paths = spec->speaker_paths;
+ num_paths = spec->autocfg.speaker_outs;
+ }
+
+ for (i = 0; i < num_paths; i++) {
+ path = snd_hda_get_path_from_idx(codec, paths[i]);
+ if (!path)
+ continue;
+ for (p = 0; p < path->depth; p++)
+ if (force_mute_output_path(codec, path->path[p]))
+ updated = true;
+ }
+
+ return updated;
+}
+EXPORT_SYMBOL_GPL(snd_hda_gen_shutup_speakers);
+
/**
* snd_hda_gen_parse_auto_config - Parse the given BIOS configuration and
* set up the hda_gen_spec
diff --git a/sound/pci/hda/hda_generic.h b/sound/pci/hda/hda_generic.h
index 8f5ecf740c49..08544601b4ce 100644
--- a/sound/pci/hda/hda_generic.h
+++ b/sound/pci/hda/hda_generic.h
@@ -353,5 +353,6 @@ int snd_hda_gen_add_mute_led_cdev(struct hda_codec *codec,
int snd_hda_gen_add_micmute_led_cdev(struct hda_codec *codec,
int (*callback)(struct led_classdev *,
enum led_brightness));
+bool snd_hda_gen_shutup_speakers(struct hda_codec *codec);
#endif /* __SOUND_HDA_GENERIC_H */
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index b33602e64d17..97d33a48ff17 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -40,6 +40,7 @@
#ifdef CONFIG_X86
/* for snoop control */
+#include <linux/dma-map-ops.h>
#include <asm/set_memory.h>
#include <asm/cpufeature.h>
#endif
@@ -306,7 +307,7 @@ enum {
/* quirks for ATI HDMI with snoop off */
#define AZX_DCAPS_PRESET_ATI_HDMI_NS \
- (AZX_DCAPS_PRESET_ATI_HDMI | AZX_DCAPS_SNOOP_OFF)
+ (AZX_DCAPS_PRESET_ATI_HDMI | AZX_DCAPS_AMD_ALLOC_FIX)
/* quirks for AMD SB */
#define AZX_DCAPS_PRESET_AMD_SB \
@@ -1702,6 +1703,13 @@ static void azx_check_snoop_available(struct azx *chip)
if (chip->driver_caps & AZX_DCAPS_SNOOP_OFF)
snoop = false;
+#ifdef CONFIG_X86
+ /* check the presence of DMA ops (i.e. IOMMU), disable snoop conditionally */
+ if ((chip->driver_caps & AZX_DCAPS_AMD_ALLOC_FIX) &&
+ !get_dma_ops(chip->card->dev))
+ snoop = false;
+#endif
+
chip->snoop = snoop;
if (!snoop) {
dev_info(chip->card->dev, "Force to non-snoop mode\n");
diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
index 17389a3801bd..f030669243f9 100644
--- a/sound/pci/hda/patch_conexant.c
+++ b/sound/pci/hda/patch_conexant.c
@@ -21,12 +21,6 @@
#include "hda_jack.h"
#include "hda_generic.h"
-enum {
- CX_HEADSET_NOPRESENT = 0,
- CX_HEADSET_PARTPRESENT,
- CX_HEADSET_ALLPRESENT,
-};
-
struct conexant_spec {
struct hda_gen_spec gen;
@@ -48,7 +42,6 @@ struct conexant_spec {
unsigned int gpio_led;
unsigned int gpio_mute_led_mask;
unsigned int gpio_mic_led_mask;
- unsigned int headset_present_flag;
bool is_cx8070_sn6140;
};
@@ -212,6 +205,8 @@ static void cx_auto_shutdown(struct hda_codec *codec)
{
struct conexant_spec *spec = codec->spec;
+ snd_hda_gen_shutup_speakers(codec);
+
/* Turn the problematic codec into D3 to avoid spurious noises
from the internal speaker during (and after) reboot */
cx_auto_turn_eapd(codec, spec->num_eapds, spec->eapds, false);
@@ -250,48 +245,19 @@ static void cx_process_headset_plugin(struct hda_codec *codec)
}
}
-static void cx_update_headset_mic_vref(struct hda_codec *codec, unsigned int res)
+static void cx_update_headset_mic_vref(struct hda_codec *codec, struct hda_jack_callback *event)
{
- unsigned int phone_present, mic_persent, phone_tag, mic_tag;
- struct conexant_spec *spec = codec->spec;
+ unsigned int mic_present;
/* In cx8070 and sn6140, the node 16 can only be config to headphone or disabled,
* the node 19 can only be config to microphone or disabled.
* Check hp&mic tag to process headset pulgin&plugout.
*/
- phone_tag = snd_hda_codec_read(codec, 0x16, 0, AC_VERB_GET_UNSOLICITED_RESPONSE, 0x0);
- mic_tag = snd_hda_codec_read(codec, 0x19, 0, AC_VERB_GET_UNSOLICITED_RESPONSE, 0x0);
- if ((phone_tag & (res >> AC_UNSOL_RES_TAG_SHIFT)) ||
- (mic_tag & (res >> AC_UNSOL_RES_TAG_SHIFT))) {
- phone_present = snd_hda_codec_read(codec, 0x16, 0, AC_VERB_GET_PIN_SENSE, 0x0);
- if (!(phone_present & AC_PINSENSE_PRESENCE)) {/* headphone plugout */
- spec->headset_present_flag = CX_HEADSET_NOPRESENT;
- snd_hda_codec_write(codec, 0x19, 0, AC_VERB_SET_PIN_WIDGET_CONTROL, 0x20);
- return;
- }
- if (spec->headset_present_flag == CX_HEADSET_NOPRESENT) {
- spec->headset_present_flag = CX_HEADSET_PARTPRESENT;
- } else if (spec->headset_present_flag == CX_HEADSET_PARTPRESENT) {
- mic_persent = snd_hda_codec_read(codec, 0x19, 0,
- AC_VERB_GET_PIN_SENSE, 0x0);
- /* headset is present */
- if ((phone_present & AC_PINSENSE_PRESENCE) &&
- (mic_persent & AC_PINSENSE_PRESENCE)) {
- cx_process_headset_plugin(codec);
- spec->headset_present_flag = CX_HEADSET_ALLPRESENT;
- }
- }
- }
-}
-
-static void cx_jack_unsol_event(struct hda_codec *codec, unsigned int res)
-{
- struct conexant_spec *spec = codec->spec;
-
- if (spec->is_cx8070_sn6140)
- cx_update_headset_mic_vref(codec, res);
-
- snd_hda_jack_unsol_event(codec, res);
+ mic_present = snd_hda_codec_read(codec, 0x19, 0, AC_VERB_GET_PIN_SENSE, 0x0);
+ if (!(mic_present & AC_PINSENSE_PRESENCE)) /* mic plugout */
+ snd_hda_codec_write(codec, 0x19, 0, AC_VERB_SET_PIN_WIDGET_CONTROL, 0x20);
+ else
+ cx_process_headset_plugin(codec);
}
static int cx_auto_suspend(struct hda_codec *codec)
@@ -305,7 +271,7 @@ static const struct hda_codec_ops cx_auto_patch_ops = {
.build_pcms = snd_hda_gen_build_pcms,
.init = cx_auto_init,
.free = cx_auto_free,
- .unsol_event = cx_jack_unsol_event,
+ .unsol_event = snd_hda_jack_unsol_event,
.suspend = cx_auto_suspend,
.check_power_status = snd_hda_gen_check_power_status,
};
@@ -1163,7 +1129,7 @@ static int patch_conexant_auto(struct hda_codec *codec)
case 0x14f11f86:
case 0x14f11f87:
spec->is_cx8070_sn6140 = true;
- spec->headset_present_flag = CX_HEADSET_NOPRESENT;
+ snd_hda_jack_detect_enable_callback(codec, 0x19, cx_update_headset_mic_vref);
break;
}
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index ba0ce8750ca4..1645d21d422f 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -9872,6 +9872,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x1025, 0x079b, "Acer Aspire V5-573G", ALC282_FIXUP_ASPIRE_V5_PINS),
SND_PCI_QUIRK(0x1025, 0x080d, "Acer Aspire V5-122P", ALC269_FIXUP_ASPIRE_HEADSET_MIC),
SND_PCI_QUIRK(0x1025, 0x0840, "Acer Aspire E1", ALC269VB_FIXUP_ASPIRE_E1_COEF),
+ SND_PCI_QUIRK(0x1025, 0x100c, "Acer Aspire E5-574G", ALC255_FIXUP_ACER_LIMIT_INT_MIC_BOOST),
SND_PCI_QUIRK(0x1025, 0x101c, "Acer Veriton N2510G", ALC269_FIXUP_LIFEBOOK),
SND_PCI_QUIRK(0x1025, 0x102b, "Acer Aspire C24-860", ALC286_FIXUP_ACER_AIO_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1025, 0x1065, "Acer Aspire C20-820", ALC269VC_FIXUP_ACER_HEADSET_MIC),
diff --git a/sound/usb/stream.c b/sound/usb/stream.c
index d5409f387945..e14c725acebf 100644
--- a/sound/usb/stream.c
+++ b/sound/usb/stream.c
@@ -244,8 +244,8 @@ static struct snd_pcm_chmap_elem *convert_chmap(int channels, unsigned int bits,
SNDRV_CHMAP_FR, /* right front */
SNDRV_CHMAP_FC, /* center front */
SNDRV_CHMAP_LFE, /* LFE */
- SNDRV_CHMAP_SL, /* left surround */
- SNDRV_CHMAP_SR, /* right surround */
+ SNDRV_CHMAP_RL, /* left surround */
+ SNDRV_CHMAP_RR, /* right surround */
SNDRV_CHMAP_FLC, /* left of center */
SNDRV_CHMAP_FRC, /* right of center */
SNDRV_CHMAP_RC, /* surround */
diff --git a/tools/build/feature/Makefile b/tools/build/feature/Makefile
index 489cbed7e82a..12796808f07a 100644
--- a/tools/build/feature/Makefile
+++ b/tools/build/feature/Makefile
@@ -82,7 +82,30 @@ FILES= \
FILES := $(addprefix $(OUTPUT),$(FILES))
-PKG_CONFIG ?= $(CROSS_COMPILE)pkg-config
+# Some distros provide the command $(CROSS_COMPILE)pkg-config for
+# searching packges installed with Multiarch. Use it for cross
+# compilation if it is existed.
+ifneq (, $(shell which $(CROSS_COMPILE)pkg-config))
+ PKG_CONFIG ?= $(CROSS_COMPILE)pkg-config
+else
+ PKG_CONFIG ?= pkg-config
+
+ # PKG_CONFIG_PATH or PKG_CONFIG_LIBDIR, alongside PKG_CONFIG_SYSROOT_DIR
+ # for modified system root, are required for the cross compilation.
+ # If these PKG_CONFIG environment variables are not set, Multiarch library
+ # paths are used instead.
+ ifdef CROSS_COMPILE
+ ifeq ($(PKG_CONFIG_LIBDIR)$(PKG_CONFIG_PATH)$(PKG_CONFIG_SYSROOT_DIR),)
+ CROSS_ARCH = $(shell $(CC) -dumpmachine)
+ PKG_CONFIG_LIBDIR := /usr/local/$(CROSS_ARCH)/lib/pkgconfig/
+ PKG_CONFIG_LIBDIR := $(PKG_CONFIG_LIBDIR):/usr/local/lib/$(CROSS_ARCH)/pkgconfig/
+ PKG_CONFIG_LIBDIR := $(PKG_CONFIG_LIBDIR):/usr/lib/$(CROSS_ARCH)/pkgconfig/
+ PKG_CONFIG_LIBDIR := $(PKG_CONFIG_LIBDIR):/usr/local/share/pkgconfig/
+ PKG_CONFIG_LIBDIR := $(PKG_CONFIG_LIBDIR):/usr/share/pkgconfig/
+ export PKG_CONFIG_LIBDIR
+ endif
+ endif
+endif
all: $(FILES)
@@ -147,7 +170,17 @@ $(OUTPUT)test-libopencsd.bin:
DWARFLIBS := -ldw
ifeq ($(findstring -static,${LDFLAGS}),-static)
-DWARFLIBS += -lelf -lebl -lz -llzma -lbz2
+ DWARFLIBS += -lelf -lz -llzma -lbz2 -lzstd
+
+ LIBDW_VERSION := $(shell $(PKG_CONFIG) --modversion libdw)
+ LIBDW_VERSION_1 := $(word 1, $(subst ., ,$(LIBDW_VERSION)))
+ LIBDW_VERSION_2 := $(word 2, $(subst ., ,$(LIBDW_VERSION)))
+
+ # Elfutils merged libebl.a into libdw.a starting from version 0.177,
+ # Link libebl.a only if libdw is older than this version.
+ ifeq ($(shell test $(LIBDW_VERSION_2) -lt 177; echo $$?),0)
+ DWARFLIBS += -lebl
+ endif
endif
$(OUTPUT)test-dwarf.bin:
@@ -178,27 +211,27 @@ $(OUTPUT)test-numa_num_possible_cpus.bin:
$(BUILD) -lnuma
$(OUTPUT)test-libunwind.bin:
- $(BUILD) -lelf
+ $(BUILD) -lelf -llzma
$(OUTPUT)test-libunwind-debug-frame.bin:
- $(BUILD) -lelf
+ $(BUILD) -lelf -llzma
$(OUTPUT)test-libunwind-x86.bin:
- $(BUILD) -lelf -lunwind-x86
+ $(BUILD) -lelf -llzma -lunwind-x86
$(OUTPUT)test-libunwind-x86_64.bin:
- $(BUILD) -lelf -lunwind-x86_64
+ $(BUILD) -lelf -llzma -lunwind-x86_64
$(OUTPUT)test-libunwind-arm.bin:
- $(BUILD) -lelf -lunwind-arm
+ $(BUILD) -lelf -llzma -lunwind-arm
$(OUTPUT)test-libunwind-aarch64.bin:
- $(BUILD) -lelf -lunwind-aarch64
+ $(BUILD) -lelf -llzma -lunwind-aarch64
$(OUTPUT)test-libunwind-debug-frame-arm.bin:
- $(BUILD) -lelf -lunwind-arm
+ $(BUILD) -lelf -llzma -lunwind-arm
$(OUTPUT)test-libunwind-debug-frame-aarch64.bin:
- $(BUILD) -lelf -lunwind-aarch64
+ $(BUILD) -lelf -llzma -lunwind-aarch64
$(OUTPUT)test-libaudit.bin:
$(BUILD) -laudit
diff --git a/tools/perf/Documentation/Build.txt b/tools/perf/Documentation/Build.txt
index 3766886c4bca..83dc87c662b6 100644
--- a/tools/perf/Documentation/Build.txt
+++ b/tools/perf/Documentation/Build.txt
@@ -71,3 +71,31 @@ supported by GCC. UBSan detects undefined behaviors of programs at runtime.
$ UBSAN_OPTIONS=print_stacktrace=1 ./perf record -a
If UBSan detects any problem at runtime, it outputs a “runtime error:” message.
+
+4) Cross compilation
+====================
+As Multiarch is commonly supported in Linux distributions, we can install
+libraries for multiple architectures on the same system and then cross-compile
+Linux perf. For example, Aarch64 libraries and toolchains can be installed on
+an x86_64 machine, allowing us to compile perf for an Aarch64 target.
+
+Below is the command for building the perf with dynamic linking.
+
+ $ cd /path/to/Linux
+ $ make ARCH=arm64 CROSS_COMPILE=aarch64-linux-gnu- -C tools/perf
+
+For static linking, the option `LDFLAGS="-static"` is required.
+
+ $ make ARCH=arm64 CROSS_COMPILE=aarch64-linux-gnu- \
+ LDFLAGS="-static" -C tools/perf
+
+In the embedded system world, a use case is to explicitly specify the package
+configuration paths for cross building:
+
+ $ PKG_CONFIG_SYSROOT_DIR="/path/to/cross/build/sysroot" \
+ PKG_CONFIG_LIBDIR="/usr/lib/:/usr/local/lib" \
+ make ARCH=arm64 CROSS_COMPILE=aarch64-linux-gnu- -C tools/perf
+
+In this case, the variable PKG_CONFIG_SYSROOT_DIR can be used alongside the
+variable PKG_CONFIG_LIBDIR or PKG_CONFIG_PATH to prepend the sysroot path to
+the library paths for cross compilation.
diff --git a/tools/perf/Makefile.config b/tools/perf/Makefile.config
index c896babf7a74..fa679db61f62 100644
--- a/tools/perf/Makefile.config
+++ b/tools/perf/Makefile.config
@@ -152,7 +152,17 @@ ifdef LIBDW_DIR
endif
DWARFLIBS := -ldw
ifeq ($(findstring -static,${LDFLAGS}),-static)
- DWARFLIBS += -lelf -lebl -ldl -lz -llzma -lbz2
+ DWARFLIBS += -lelf -ldl -lz -llzma -lbz2 -lzstd
+
+ LIBDW_VERSION := $(shell $(PKG_CONFIG) --modversion libdw)
+ LIBDW_VERSION_1 := $(word 1, $(subst ., ,$(LIBDW_VERSION)))
+ LIBDW_VERSION_2 := $(word 2, $(subst ., ,$(LIBDW_VERSION)))
+
+ # Elfutils merged libebl.a into libdw.a starting from version 0.177,
+ # Link libebl.a only if libdw is older than this version.
+ ifeq ($(shell test $(LIBDW_VERSION_2) -lt 177; echo $$?),0)
+ DWARFLIBS += -lebl
+ endif
endif
FEATURE_CHECK_CFLAGS-libdw-dwarf-unwind := $(LIBDW_CFLAGS)
FEATURE_CHECK_LDFLAGS-libdw-dwarf-unwind := $(LIBDW_LDFLAGS) $(DWARFLIBS)
@@ -296,6 +306,11 @@ endif
ifdef PYTHON_CONFIG
PYTHON_EMBED_LDOPTS := $(shell $(PYTHON_CONFIG_SQ) $(PYTHON_CONFIG_LDFLAGS) 2>/dev/null)
+ # Update the python flags for cross compilation
+ ifdef CROSS_COMPILE
+ PYTHON_NATIVE := $(shell echo $(PYTHON_EMBED_LDOPTS) | sed 's/\(-L.*\/\)\(.*-linux-gnu\).*/\2/')
+ PYTHON_EMBED_LDOPTS := $(subst $(PYTHON_NATIVE),$(shell $(CC) -dumpmachine),$(PYTHON_EMBED_LDOPTS))
+ endif
PYTHON_EMBED_LDFLAGS := $(call strip-libs,$(PYTHON_EMBED_LDOPTS))
PYTHON_EMBED_LIBADD := $(call grep-libs,$(PYTHON_EMBED_LDOPTS)) -lutil
PYTHON_EMBED_CCOPTS := $(shell $(PYTHON_CONFIG_SQ) --includes 2>/dev/null)
@@ -897,6 +912,9 @@ else
PYTHON_SETUPTOOLS_INSTALLED := $(shell $(PYTHON) -c 'import setuptools;' 2> /dev/null && echo "yes" || echo "no")
ifeq ($(PYTHON_SETUPTOOLS_INSTALLED), yes)
PYTHON_EXTENSION_SUFFIX := $(shell $(PYTHON) -c 'from importlib import machinery; print(machinery.EXTENSION_SUFFIXES[0])')
+ ifdef CROSS_COMPILE
+ PYTHON_EXTENSION_SUFFIX := $(subst $(PYTHON_NATIVE),$(shell $(CC) -dumpmachine),$(PYTHON_EXTENSION_SUFFIX))
+ endif
LANG_BINDINGS += $(obj-perf)python/perf$(PYTHON_EXTENSION_SUFFIX)
else
$(warning Missing python setuptools, the python binding won't be built, please install python3-setuptools or equivalent)
diff --git a/tools/perf/Makefile.perf b/tools/perf/Makefile.perf
index 175e4c7898f0..f8148db5fc38 100644
--- a/tools/perf/Makefile.perf
+++ b/tools/perf/Makefile.perf
@@ -193,7 +193,32 @@ HOSTLD ?= ld
HOSTAR ?= ar
CLANG ?= clang
-PKG_CONFIG = $(CROSS_COMPILE)pkg-config
+# Some distros provide the command $(CROSS_COMPILE)pkg-config for
+# searching packges installed with Multiarch. Use it for cross
+# compilation if it is existed.
+ifneq (, $(shell which $(CROSS_COMPILE)pkg-config))
+ PKG_CONFIG ?= $(CROSS_COMPILE)pkg-config
+else
+ PKG_CONFIG ?= pkg-config
+
+ # PKG_CONFIG_PATH or PKG_CONFIG_LIBDIR, alongside PKG_CONFIG_SYSROOT_DIR
+ # for modified system root, is required for the cross compilation.
+ # If these PKG_CONFIG environment variables are not set, Multiarch library
+ # paths are used instead.
+ ifdef CROSS_COMPILE
+ ifeq ($(PKG_CONFIG_LIBDIR)$(PKG_CONFIG_PATH)$(PKG_CONFIG_SYSROOT_DIR),)
+ CROSS_ARCH = $(shell $(CC) -dumpmachine)
+ PKG_CONFIG_LIBDIR := /usr/local/$(CROSS_ARCH)/lib/pkgconfig/
+ PKG_CONFIG_LIBDIR := $(PKG_CONFIG_LIBDIR):/usr/local/lib/$(CROSS_ARCH)/pkgconfig/
+ PKG_CONFIG_LIBDIR := $(PKG_CONFIG_LIBDIR):/usr/lib/$(CROSS_ARCH)/pkgconfig/
+ PKG_CONFIG_LIBDIR := $(PKG_CONFIG_LIBDIR):/usr/local/share/pkgconfig/
+ PKG_CONFIG_LIBDIR := $(PKG_CONFIG_LIBDIR):/usr/share/pkgconfig/
+ export PKG_CONFIG_LIBDIR
+ $(warning Missing PKG_CONFIG_LIBDIR, PKG_CONFIG_PATH and PKG_CONFIG_SYSROOT_DIR for cross compilation,)
+ $(warning set PKG_CONFIG_LIBDIR for using Multiarch libs.)
+ endif
+ endif
+endif
RM = rm -f
LN = ln -f
diff --git a/tools/perf/pmu-events/arch/riscv/andes/ax45/firmware.json b/tools/perf/pmu-events/arch/riscv/andes/ax45/firmware.json
index 9b4a032186a7..7149caec4f80 100644
--- a/tools/perf/pmu-events/arch/riscv/andes/ax45/firmware.json
+++ b/tools/perf/pmu-events/arch/riscv/andes/ax45/firmware.json
@@ -36,7 +36,7 @@
"ArchStdEvent": "FW_SFENCE_VMA_RECEIVED"
},
{
- "ArchStdEvent": "FW_SFENCE_VMA_RECEIVED"
+ "ArchStdEvent": "FW_SFENCE_VMA_ASID_SENT"
},
{
"ArchStdEvent": "FW_SFENCE_VMA_ASID_RECEIVED"
diff --git a/tools/perf/pmu-events/arch/riscv/riscv-sbi-firmware.json b/tools/perf/pmu-events/arch/riscv/riscv-sbi-firmware.json
index a9939823b14b..0c9b9a2d2958 100644
--- a/tools/perf/pmu-events/arch/riscv/riscv-sbi-firmware.json
+++ b/tools/perf/pmu-events/arch/riscv/riscv-sbi-firmware.json
@@ -74,7 +74,7 @@
{
"PublicDescription": "Sent SFENCE.VMA with ASID request to other HART event",
"ConfigCode": "0x800000000000000c",
- "EventName": "FW_SFENCE_VMA_RECEIVED",
+ "EventName": "FW_SFENCE_VMA_ASID_SENT",
"BriefDescription": "Sent SFENCE.VMA with ASID request to other HART event"
},
{
diff --git a/tools/perf/pmu-events/arch/riscv/sifive/u74/firmware.json b/tools/perf/pmu-events/arch/riscv/sifive/u74/firmware.json
index 9b4a032186a7..7149caec4f80 100644
--- a/tools/perf/pmu-events/arch/riscv/sifive/u74/firmware.json
+++ b/tools/perf/pmu-events/arch/riscv/sifive/u74/firmware.json
@@ -36,7 +36,7 @@
"ArchStdEvent": "FW_SFENCE_VMA_RECEIVED"
},
{
- "ArchStdEvent": "FW_SFENCE_VMA_RECEIVED"
+ "ArchStdEvent": "FW_SFENCE_VMA_ASID_SENT"
},
{
"ArchStdEvent": "FW_SFENCE_VMA_ASID_RECEIVED"
diff --git a/tools/perf/pmu-events/arch/riscv/starfive/dubhe-80/firmware.json b/tools/perf/pmu-events/arch/riscv/starfive/dubhe-80/firmware.json
index 9b4a032186a7..7149caec4f80 100644
--- a/tools/perf/pmu-events/arch/riscv/starfive/dubhe-80/firmware.json
+++ b/tools/perf/pmu-events/arch/riscv/starfive/dubhe-80/firmware.json
@@ -36,7 +36,7 @@
"ArchStdEvent": "FW_SFENCE_VMA_RECEIVED"
},
{
- "ArchStdEvent": "FW_SFENCE_VMA_RECEIVED"
+ "ArchStdEvent": "FW_SFENCE_VMA_ASID_SENT"
},
{
"ArchStdEvent": "FW_SFENCE_VMA_ASID_RECEIVED"
diff --git a/tools/perf/pmu-events/arch/riscv/thead/c900-legacy/firmware.json b/tools/perf/pmu-events/arch/riscv/thead/c900-legacy/firmware.json
index 9b4a032186a7..7149caec4f80 100644
--- a/tools/perf/pmu-events/arch/riscv/thead/c900-legacy/firmware.json
+++ b/tools/perf/pmu-events/arch/riscv/thead/c900-legacy/firmware.json
@@ -36,7 +36,7 @@
"ArchStdEvent": "FW_SFENCE_VMA_RECEIVED"
},
{
- "ArchStdEvent": "FW_SFENCE_VMA_RECEIVED"
+ "ArchStdEvent": "FW_SFENCE_VMA_ASID_SENT"
},
{
"ArchStdEvent": "FW_SFENCE_VMA_ASID_RECEIVED"
diff --git a/tools/perf/util/callchain.c b/tools/perf/util/callchain.c
index 1730b852a947..6d075648d2cc 100644
--- a/tools/perf/util/callchain.c
+++ b/tools/perf/util/callchain.c
@@ -1141,7 +1141,7 @@ int hist_entry__append_callchain(struct hist_entry *he, struct perf_sample *samp
int fill_callchain_info(struct addr_location *al, struct callchain_cursor_node *node,
bool hide_unresolved)
{
- struct machine *machine = maps__machine(node->ms.maps);
+ struct machine *machine = node->ms.maps ? maps__machine(node->ms.maps) : NULL;
maps__put(al->maps);
al->maps = maps__get(node->ms.maps);
diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile
index dd49c1d23a60..81d4757ecd4c 100644
--- a/tools/testing/selftests/bpf/Makefile
+++ b/tools/testing/selftests/bpf/Makefile
@@ -713,7 +713,7 @@ $(OUTPUT)/xdp_features: xdp_features.c $(OUTPUT)/network_helpers.o $(OUTPUT)/xdp
# Make sure we are able to include and link libbpf against c++.
$(OUTPUT)/test_cpp: test_cpp.cpp $(OUTPUT)/test_core_extern.skel.h $(BPFOBJ)
$(call msg,CXX,,$@)
- $(Q)$(CXX) $(CFLAGS) $(filter %.a %.o %.cpp,$^) $(LDLIBS) -o $@
+ $(Q)$(CXX) $(subst -D_GNU_SOURCE=,,$(CFLAGS)) $(filter %.a %.o %.cpp,$^) $(LDLIBS) -o $@
# Benchmark runner
$(OUTPUT)/bench_%.o: benchs/bench_%.c bench.h $(BPFOBJ)
diff --git a/tools/testing/selftests/bpf/prog_tests/uprobe_syscall.c b/tools/testing/selftests/bpf/prog_tests/uprobe_syscall.c
index bd8c75b620c2..c397336fe1ed 100644
--- a/tools/testing/selftests/bpf/prog_tests/uprobe_syscall.c
+++ b/tools/testing/selftests/bpf/prog_tests/uprobe_syscall.c
@@ -216,7 +216,7 @@ static void test_uretprobe_regs_change(void)
}
#ifndef __NR_uretprobe
-#define __NR_uretprobe 467
+#define __NR_uretprobe 335
#endif
__naked unsigned long uretprobe_syscall_call_1(void)
@@ -253,7 +253,7 @@ static void test_uretprobe_syscall_call(void)
struct uprobe_syscall_executed *skel;
int pid, status, err, go[2], c;
- if (ASSERT_OK(pipe(go), "pipe"))
+ if (!ASSERT_OK(pipe(go), "pipe"))
return;
skel = uprobe_syscall_executed__open_and_load();
diff --git a/tools/testing/selftests/dmabuf-heaps/dmabuf-heap.c b/tools/testing/selftests/dmabuf-heaps/dmabuf-heap.c
index 5f541522364f..5d0a809dc2df 100644
--- a/tools/testing/selftests/dmabuf-heaps/dmabuf-heap.c
+++ b/tools/testing/selftests/dmabuf-heaps/dmabuf-heap.c
@@ -29,9 +29,11 @@ static int check_vgem(int fd)
version.name = name;
ret = ioctl(fd, DRM_IOCTL_VERSION, &version);
- if (ret)
+ if (ret || version.name_len != 4)
return 0;
+ name[4] = '\0';
+
return !strcmp(name, "vgem");
}
diff --git a/tools/testing/selftests/drivers/net/hw/rss_ctx.py b/tools/testing/selftests/drivers/net/hw/rss_ctx.py
index 931dbc36ca43..011508ca604b 100755
--- a/tools/testing/selftests/drivers/net/hw/rss_ctx.py
+++ b/tools/testing/selftests/drivers/net/hw/rss_ctx.py
@@ -19,6 +19,15 @@ def _rss_key_rand(length):
return [random.randint(0, 255) for _ in range(length)]
+def _rss_key_check(cfg, data=None, context=0):
+ if data is None:
+ data = get_rss(cfg, context=context)
+ if 'rss-hash-key' not in data:
+ return
+ non_zero = [x for x in data['rss-hash-key'] if x != 0]
+ ksft_eq(bool(non_zero), True, comment=f"RSS key is all zero {data['rss-hash-key']}")
+
+
def get_rss(cfg, context=0):
return ethtool(f"-x {cfg.ifname} context {context}", json=True)[0]
@@ -90,8 +99,9 @@ def _send_traffic_check(cfg, port, name, params):
def test_rss_key_indir(cfg):
"""Test basics like updating the main RSS key and indirection table."""
- if len(_get_rx_cnts(cfg)) < 2:
- KsftSkipEx("Device has only one queue (or doesn't support queue stats)")
+ qcnt = len(_get_rx_cnts(cfg))
+ if qcnt < 3:
+ KsftSkipEx("Device has fewer than 3 queues (or doesn't support queue stats)")
data = get_rss(cfg)
want_keys = ['rss-hash-key', 'rss-hash-function', 'rss-indirection-table']
@@ -101,6 +111,7 @@ def test_rss_key_indir(cfg):
if not data[k]:
raise KsftFailEx(f"ethtool results empty for '{k}': {data[k]}")
+ _rss_key_check(cfg, data=data)
key_len = len(data['rss-hash-key'])
# Set the key
@@ -110,9 +121,26 @@ def test_rss_key_indir(cfg):
data = get_rss(cfg)
ksft_eq(key, data['rss-hash-key'])
+ # Set the indirection table and the key together
+ key = _rss_key_rand(key_len)
+ ethtool(f"-X {cfg.ifname} equal 3 hkey " + _rss_key_str(key))
+ reset_indir = defer(ethtool, f"-X {cfg.ifname} default")
+
+ data = get_rss(cfg)
+ _rss_key_check(cfg, data=data)
+ ksft_eq(0, min(data['rss-indirection-table']))
+ ksft_eq(2, max(data['rss-indirection-table']))
+
+ # Reset indirection table and set the key
+ key = _rss_key_rand(key_len)
+ ethtool(f"-X {cfg.ifname} default hkey " + _rss_key_str(key))
+ data = get_rss(cfg)
+ _rss_key_check(cfg, data=data)
+ ksft_eq(0, min(data['rss-indirection-table']))
+ ksft_eq(qcnt - 1, max(data['rss-indirection-table']))
+
# Set the indirection table
ethtool(f"-X {cfg.ifname} equal 2")
- reset_indir = defer(ethtool, f"-X {cfg.ifname} default")
data = get_rss(cfg)
ksft_eq(0, min(data['rss-indirection-table']))
ksft_eq(1, max(data['rss-indirection-table']))
@@ -317,8 +345,11 @@ def test_rss_context(cfg, ctx_cnt=1, create_with_cfg=None):
ctx_cnt = i
break
+ _rss_key_check(cfg, context=ctx_id)
+
if not create_with_cfg:
ethtool(f"-X {cfg.ifname} context {ctx_id} {want_cfg}")
+ _rss_key_check(cfg, context=ctx_id)
# Sanity check the context we just created
data = get_rss(cfg, ctx_id)
diff --git a/tools/testing/selftests/hid/hid_bpf.c b/tools/testing/selftests/hid/hid_bpf.c
index dc0408a831d0..75b7b4ef6cfa 100644
--- a/tools/testing/selftests/hid/hid_bpf.c
+++ b/tools/testing/selftests/hid/hid_bpf.c
@@ -532,6 +532,7 @@ static void load_programs(const struct test_program programs[],
FIXTURE_DATA(hid_bpf) * self,
const FIXTURE_VARIANT(hid_bpf) * variant)
{
+ struct bpf_map *iter_map;
int err = -EINVAL;
ASSERT_LE(progs_count, ARRAY_SIZE(self->hid_links))
@@ -564,6 +565,13 @@ static void load_programs(const struct test_program programs[],
*ops_hid_id = self->hid_id;
}
+ /* we disable the auto-attach feature of all maps because we
+ * only want the tested one to be manually attached in the next
+ * call to bpf_map__attach_struct_ops()
+ */
+ bpf_object__for_each_map(iter_map, *self->skel->skeleton->obj)
+ bpf_map__set_autoattach(iter_map, false);
+
err = hid__load(self->skel);
ASSERT_OK(err) TH_LOG("hid_skel_load failed: %d", err);
@@ -687,6 +695,24 @@ TEST_F(hid_bpf, subprog_raw_event)
}
/*
+ * Attach hid_first_event to the given uhid device,
+ * attempt at re-attaching it, we should not lock and
+ * return an invalid struct bpf_link
+ */
+TEST_F(hid_bpf, multiple_attach)
+{
+ const struct test_program progs[] = {
+ { .name = "hid_first_event" },
+ };
+ struct bpf_link *link;
+
+ LOAD_PROGRAMS(progs);
+
+ link = bpf_map__attach_struct_ops(self->skel->maps.first_event);
+ ASSERT_NULL(link) TH_LOG("unexpected return value when re-attaching the struct_ops");
+}
+
+/*
* Ensures that we can attach/detach programs
*/
TEST_F(hid_bpf, test_attach_detach)
diff --git a/tools/testing/selftests/hid/progs/hid.c b/tools/testing/selftests/hid/progs/hid.c
index ee9bbbcf751b..5ecc845ef792 100644
--- a/tools/testing/selftests/hid/progs/hid.c
+++ b/tools/testing/selftests/hid/progs/hid.c
@@ -455,7 +455,7 @@ struct {
__type(value, struct elem);
} hmap SEC(".maps");
-static int wq_cb_sleepable(void *map, int *key, struct bpf_wq *work)
+static int wq_cb_sleepable(void *map, int *key, void *work)
{
__u8 buf[9] = {2, 3, 4, 5, 6, 7, 8, 9, 10};
struct hid_bpf_ctx *hid_ctx;
diff --git a/tools/testing/selftests/hid/progs/hid_bpf_helpers.h b/tools/testing/selftests/hid/progs/hid_bpf_helpers.h
index cfe37f491906..e5db897586bb 100644
--- a/tools/testing/selftests/hid/progs/hid_bpf_helpers.h
+++ b/tools/testing/selftests/hid/progs/hid_bpf_helpers.h
@@ -114,7 +114,7 @@ extern int hid_bpf_try_input_report(struct hid_bpf_ctx *ctx,
extern int bpf_wq_init(struct bpf_wq *wq, void *p__map, unsigned int flags) __weak __ksym;
extern int bpf_wq_start(struct bpf_wq *wq, unsigned int flags) __weak __ksym;
extern int bpf_wq_set_callback_impl(struct bpf_wq *wq,
- int (callback_fn)(void *map, int *key, struct bpf_wq *wq),
+ int (callback_fn)(void *map, int *key, void *wq),
unsigned int flags__k, void *aux__ign) __ksym;
#define bpf_wq_set_callback(timer, cb, flags) \
bpf_wq_set_callback_impl(timer, cb, flags, NULL)
diff --git a/tools/testing/selftests/kvm/riscv/get-reg-list.c b/tools/testing/selftests/kvm/riscv/get-reg-list.c
index f92c2fb23fcd..8e34f7fa44e9 100644
--- a/tools/testing/selftests/kvm/riscv/get-reg-list.c
+++ b/tools/testing/selftests/kvm/riscv/get-reg-list.c
@@ -961,10 +961,10 @@ KVM_ISA_EXT_SIMPLE_CONFIG(zbkb, ZBKB);
KVM_ISA_EXT_SIMPLE_CONFIG(zbkc, ZBKC);
KVM_ISA_EXT_SIMPLE_CONFIG(zbkx, ZBKX);
KVM_ISA_EXT_SIMPLE_CONFIG(zbs, ZBS);
-KVM_ISA_EXT_SIMPLE_CONFIG(zca, ZCA),
-KVM_ISA_EXT_SIMPLE_CONFIG(zcb, ZCB),
-KVM_ISA_EXT_SIMPLE_CONFIG(zcd, ZCD),
-KVM_ISA_EXT_SIMPLE_CONFIG(zcf, ZCF),
+KVM_ISA_EXT_SIMPLE_CONFIG(zca, ZCA);
+KVM_ISA_EXT_SIMPLE_CONFIG(zcb, ZCB);
+KVM_ISA_EXT_SIMPLE_CONFIG(zcd, ZCD);
+KVM_ISA_EXT_SIMPLE_CONFIG(zcf, ZCF);
KVM_ISA_EXT_SIMPLE_CONFIG(zcmop, ZCMOP);
KVM_ISA_EXT_SIMPLE_CONFIG(zfa, ZFA);
KVM_ISA_EXT_SIMPLE_CONFIG(zfh, ZFH);
diff --git a/tools/testing/selftests/mm/mremap_test.c b/tools/testing/selftests/mm/mremap_test.c
index 1b03bcfaefdf..5a3a9bcba640 100644
--- a/tools/testing/selftests/mm/mremap_test.c
+++ b/tools/testing/selftests/mm/mremap_test.c
@@ -22,8 +22,10 @@
#define VALIDATION_DEFAULT_THRESHOLD 4 /* 4MB */
#define VALIDATION_NO_THRESHOLD 0 /* Verify the entire region */
+#ifndef MIN
#define MIN(X, Y) ((X) < (Y) ? (X) : (Y))
#define MAX(X, Y) ((X) > (Y) ? (X) : (Y))
+#endif
#define SIZE_MB(m) ((size_t)m * (1024 * 1024))
#define SIZE_KB(k) ((size_t)k * 1024)
diff --git a/tools/testing/selftests/net/mptcp/mptcp_connect.c b/tools/testing/selftests/net/mptcp/mptcp_connect.c
index d2043ec3bf6d..4209b9569039 100644
--- a/tools/testing/selftests/net/mptcp/mptcp_connect.c
+++ b/tools/testing/selftests/net/mptcp/mptcp_connect.c
@@ -1115,11 +1115,11 @@ again:
return 1;
}
- if (--cfg_repeat > 0) {
- if (cfg_input)
- close(fd);
+ if (cfg_input)
+ close(fd);
+
+ if (--cfg_repeat > 0)
goto again;
- }
return 0;
}
diff --git a/tools/testing/selftests/net/mptcp/mptcp_join.sh b/tools/testing/selftests/net/mptcp/mptcp_join.sh
index 108aeeb84ef1..4df48f1f14ab 100755
--- a/tools/testing/selftests/net/mptcp/mptcp_join.sh
+++ b/tools/testing/selftests/net/mptcp/mptcp_join.sh
@@ -661,7 +661,7 @@ pm_nl_check_endpoint()
done
if [ -z "${id}" ]; then
- test_fail "bad test - missing endpoint id"
+ fail_test "bad test - missing endpoint id"
return
fi
@@ -1634,6 +1634,8 @@ chk_prio_nr()
{
local mp_prio_nr_tx=$1
local mp_prio_nr_rx=$2
+ local mpj_syn=$3
+ local mpj_syn_ack=$4
local count
print_check "ptx"
@@ -1655,6 +1657,26 @@ chk_prio_nr()
else
print_ok
fi
+
+ print_check "syn backup"
+ count=$(mptcp_lib_get_counter ${ns1} "MPTcpExtMPJoinSynBackupRx")
+ if [ -z "$count" ]; then
+ print_skip
+ elif [ "$count" != "$mpj_syn" ]; then
+ fail_test "got $count JOIN[s] syn with Backup expected $mpj_syn"
+ else
+ print_ok
+ fi
+
+ print_check "synack backup"
+ count=$(mptcp_lib_get_counter ${ns2} "MPTcpExtMPJoinSynAckBackupRx")
+ if [ -z "$count" ]; then
+ print_skip
+ elif [ "$count" != "$mpj_syn_ack" ]; then
+ fail_test "got $count JOIN[s] synack with Backup expected $mpj_syn_ack"
+ else
+ print_ok
+ fi
}
chk_subflow_nr()
@@ -2612,33 +2634,46 @@ backup_tests()
sflags=nobackup speed=slow \
run_tests $ns1 $ns2 10.0.1.1
chk_join_nr 1 1 1
- chk_prio_nr 0 1
+ chk_prio_nr 0 1 1 0
fi
# single address, backup
if reset "single address, backup" &&
continue_if mptcp_lib_kallsyms_has "subflow_rebuild_header$"; then
pm_nl_set_limits $ns1 0 1
+ pm_nl_add_endpoint $ns1 10.0.2.1 flags signal,backup
+ pm_nl_set_limits $ns2 1 1
+ sflags=nobackup speed=slow \
+ run_tests $ns1 $ns2 10.0.1.1
+ chk_join_nr 1 1 1
+ chk_add_nr 1 1
+ chk_prio_nr 1 0 0 1
+ fi
+
+ # single address, switch to backup
+ if reset "single address, switch to backup" &&
+ continue_if mptcp_lib_kallsyms_has "subflow_rebuild_header$"; then
+ pm_nl_set_limits $ns1 0 1
pm_nl_add_endpoint $ns1 10.0.2.1 flags signal
pm_nl_set_limits $ns2 1 1
sflags=backup speed=slow \
run_tests $ns1 $ns2 10.0.1.1
chk_join_nr 1 1 1
chk_add_nr 1 1
- chk_prio_nr 1 1
+ chk_prio_nr 1 1 0 0
fi
# single address with port, backup
if reset "single address with port, backup" &&
continue_if mptcp_lib_kallsyms_has "subflow_rebuild_header$"; then
pm_nl_set_limits $ns1 0 1
- pm_nl_add_endpoint $ns1 10.0.2.1 flags signal port 10100
+ pm_nl_add_endpoint $ns1 10.0.2.1 flags signal,backup port 10100
pm_nl_set_limits $ns2 1 1
- sflags=backup speed=slow \
+ sflags=nobackup speed=slow \
run_tests $ns1 $ns2 10.0.1.1
chk_join_nr 1 1 1
chk_add_nr 1 1
- chk_prio_nr 1 1
+ chk_prio_nr 1 0 0 1
fi
if reset "mpc backup" &&
@@ -2647,17 +2682,26 @@ backup_tests()
speed=slow \
run_tests $ns1 $ns2 10.0.1.1
chk_join_nr 0 0 0
- chk_prio_nr 0 1
+ chk_prio_nr 0 1 0 0
fi
if reset "mpc backup both sides" &&
continue_if mptcp_lib_kallsyms_doesnt_have "T mptcp_subflow_send_ack$"; then
- pm_nl_add_endpoint $ns1 10.0.1.1 flags subflow,backup
+ pm_nl_set_limits $ns1 0 2
+ pm_nl_set_limits $ns2 1 2
+ pm_nl_add_endpoint $ns1 10.0.1.1 flags signal,backup
pm_nl_add_endpoint $ns2 10.0.1.2 flags subflow,backup
+
+ # 10.0.2.2 (non-backup) -> 10.0.1.1 (backup)
+ pm_nl_add_endpoint $ns2 10.0.2.2 flags subflow
+ # 10.0.1.2 (backup) -> 10.0.2.1 (non-backup)
+ pm_nl_add_endpoint $ns1 10.0.2.1 flags signal
+ ip -net "$ns2" route add 10.0.2.1 via 10.0.1.1 dev ns2eth1 # force this path
+
speed=slow \
run_tests $ns1 $ns2 10.0.1.1
- chk_join_nr 0 0 0
- chk_prio_nr 1 1
+ chk_join_nr 2 2 2
+ chk_prio_nr 1 1 1 1
fi
if reset "mpc switch to backup" &&
@@ -2666,7 +2710,7 @@ backup_tests()
sflags=backup speed=slow \
run_tests $ns1 $ns2 10.0.1.1
chk_join_nr 0 0 0
- chk_prio_nr 0 1
+ chk_prio_nr 0 1 0 0
fi
if reset "mpc switch to backup both sides" &&
@@ -2676,7 +2720,7 @@ backup_tests()
sflags=backup speed=slow \
run_tests $ns1 $ns2 10.0.1.1
chk_join_nr 0 0 0
- chk_prio_nr 1 1
+ chk_prio_nr 1 1 0 0
fi
}
@@ -3053,7 +3097,7 @@ fullmesh_tests()
addr_nr_ns2=1 sflags=backup,fullmesh speed=slow \
run_tests $ns1 $ns2 10.0.1.1
chk_join_nr 2 2 2
- chk_prio_nr 0 1
+ chk_prio_nr 0 1 1 0
chk_rm_nr 0 1
fi
@@ -3066,7 +3110,7 @@ fullmesh_tests()
sflags=nobackup,nofullmesh speed=slow \
run_tests $ns1 $ns2 10.0.1.1
chk_join_nr 2 2 2
- chk_prio_nr 0 1
+ chk_prio_nr 0 1 1 0
chk_rm_nr 0 1
fi
}
@@ -3318,7 +3362,7 @@ userspace_tests()
sflags=backup speed=slow \
run_tests $ns1 $ns2 10.0.1.1
chk_join_nr 1 1 0
- chk_prio_nr 0 0
+ chk_prio_nr 0 0 0 0
fi
# userspace pm type prevents rm_addr
@@ -3526,6 +3570,35 @@ endpoint_tests()
chk_mptcp_info subflows 1 subflows 1
mptcp_lib_kill_wait $tests_pid
fi
+
+ # remove and re-add
+ if reset "delete re-add signal" &&
+ mptcp_lib_kallsyms_has "subflow_rebuild_header$"; then
+ pm_nl_set_limits $ns1 1 1
+ pm_nl_set_limits $ns2 1 1
+ pm_nl_add_endpoint $ns1 10.0.2.1 id 1 flags signal
+ test_linkfail=4 speed=20 \
+ run_tests $ns1 $ns2 10.0.1.1 &
+ local tests_pid=$!
+
+ wait_mpj $ns2
+ pm_nl_check_endpoint "creation" \
+ $ns1 10.0.2.1 id 1 flags signal
+ chk_subflow_nr "before delete" 2
+ chk_mptcp_info subflows 1 subflows 1
+
+ pm_nl_del_endpoint $ns1 1 10.0.2.1
+ sleep 0.5
+ chk_subflow_nr "after delete" 1
+ chk_mptcp_info subflows 0 subflows 0
+
+ pm_nl_add_endpoint $ns1 10.0.2.1 flags signal
+ wait_mpj $ns2
+ chk_subflow_nr "after re-add" 2
+ chk_mptcp_info subflows 1 subflows 1
+ mptcp_lib_kill_wait $tests_pid
+ fi
+
}
# [$1: error message]
diff --git a/tools/testing/selftests/seccomp/seccomp_bpf.c b/tools/testing/selftests/seccomp/seccomp_bpf.c
index e3f97f90d8db..8c3a73461475 100644
--- a/tools/testing/selftests/seccomp/seccomp_bpf.c
+++ b/tools/testing/selftests/seccomp/seccomp_bpf.c
@@ -60,7 +60,9 @@
#define SKIP(s, ...) XFAIL(s, ##__VA_ARGS__)
#endif
+#ifndef MIN
#define MIN(X, Y) ((X) < (Y) ? (X) : (Y))
+#endif
#ifndef PR_SET_PTRACER
# define PR_SET_PTRACER 0x59616d61
diff --git a/virt/kvm/Kconfig b/virt/kvm/Kconfig
index b14e14cdbfb9..fd6a3010afa8 100644
--- a/virt/kvm/Kconfig
+++ b/virt/kvm/Kconfig
@@ -113,10 +113,10 @@ config KVM_GENERIC_PRIVATE_MEM
select KVM_PRIVATE_MEM
bool
-config HAVE_KVM_GMEM_PREPARE
+config HAVE_KVM_ARCH_GMEM_PREPARE
bool
depends on KVM_PRIVATE_MEM
-config HAVE_KVM_GMEM_INVALIDATE
+config HAVE_KVM_ARCH_GMEM_INVALIDATE
bool
depends on KVM_PRIVATE_MEM
diff --git a/virt/kvm/guest_memfd.c b/virt/kvm/guest_memfd.c
index 1c509c351261..8f079a61a56d 100644
--- a/virt/kvm/guest_memfd.c
+++ b/virt/kvm/guest_memfd.c
@@ -13,84 +13,93 @@ struct kvm_gmem {
struct list_head entry;
};
-static int kvm_gmem_prepare_folio(struct inode *inode, pgoff_t index, struct folio *folio)
+/**
+ * folio_file_pfn - like folio_file_page, but return a pfn.
+ * @folio: The folio which contains this index.
+ * @index: The index we want to look up.
+ *
+ * Return: The pfn for this index.
+ */
+static inline kvm_pfn_t folio_file_pfn(struct folio *folio, pgoff_t index)
{
-#ifdef CONFIG_HAVE_KVM_GMEM_PREPARE
- struct list_head *gmem_list = &inode->i_mapping->i_private_list;
- struct kvm_gmem *gmem;
+ return folio_pfn(folio) + (index & (folio_nr_pages(folio) - 1));
+}
- list_for_each_entry(gmem, gmem_list, entry) {
- struct kvm_memory_slot *slot;
- struct kvm *kvm = gmem->kvm;
- struct page *page;
- kvm_pfn_t pfn;
- gfn_t gfn;
- int rc;
-
- if (!kvm_arch_gmem_prepare_needed(kvm))
- continue;
-
- slot = xa_load(&gmem->bindings, index);
- if (!slot)
- continue;
-
- page = folio_file_page(folio, index);
- pfn = page_to_pfn(page);
- gfn = slot->base_gfn + index - slot->gmem.pgoff;
- rc = kvm_arch_gmem_prepare(kvm, gfn, pfn, compound_order(compound_head(page)));
- if (rc) {
- pr_warn_ratelimited("gmem: Failed to prepare folio for index %lx GFN %llx PFN %llx error %d.\n",
- index, gfn, pfn, rc);
- return rc;
- }
+static int __kvm_gmem_prepare_folio(struct kvm *kvm, struct kvm_memory_slot *slot,
+ pgoff_t index, struct folio *folio)
+{
+#ifdef CONFIG_HAVE_KVM_ARCH_GMEM_PREPARE
+ kvm_pfn_t pfn = folio_file_pfn(folio, index);
+ gfn_t gfn = slot->base_gfn + index - slot->gmem.pgoff;
+ int rc = kvm_arch_gmem_prepare(kvm, gfn, pfn, folio_order(folio));
+ if (rc) {
+ pr_warn_ratelimited("gmem: Failed to prepare folio for index %lx GFN %llx PFN %llx error %d.\n",
+ index, gfn, pfn, rc);
+ return rc;
}
-
#endif
+
return 0;
}
-static struct folio *kvm_gmem_get_folio(struct inode *inode, pgoff_t index, bool prepare)
+static inline void kvm_gmem_mark_prepared(struct folio *folio)
{
- struct folio *folio;
+ folio_mark_uptodate(folio);
+}
- /* TODO: Support huge pages. */
- folio = filemap_grab_folio(inode->i_mapping, index);
- if (IS_ERR(folio))
- return folio;
+/*
+ * Process @folio, which contains @gfn, so that the guest can use it.
+ * The folio must be locked and the gfn must be contained in @slot.
+ * On successful return the guest sees a zero page so as to avoid
+ * leaking host data and the up-to-date flag is set.
+ */
+static int kvm_gmem_prepare_folio(struct kvm *kvm, struct kvm_memory_slot *slot,
+ gfn_t gfn, struct folio *folio)
+{
+ unsigned long nr_pages, i;
+ pgoff_t index;
+ int r;
+
+ nr_pages = folio_nr_pages(folio);
+ for (i = 0; i < nr_pages; i++)
+ clear_highpage(folio_page(folio, i));
/*
- * Use the up-to-date flag to track whether or not the memory has been
- * zeroed before being handed off to the guest. There is no backing
- * storage for the memory, so the folio will remain up-to-date until
- * it's removed.
+ * Preparing huge folios should always be safe, since it should
+ * be possible to split them later if needed.
*
- * TODO: Skip clearing pages when trusted firmware will do it when
- * assigning memory to the guest.
+ * Right now the folio order is always going to be zero, but the
+ * code is ready for huge folios. The only assumption is that
+ * the base pgoff of memslots is naturally aligned with the
+ * requested page order, ensuring that huge folios can also use
+ * huge page table entries for GPA->HPA mapping.
+ *
+ * The order will be passed when creating the guest_memfd, and
+ * checked when creating memslots.
*/
- if (!folio_test_uptodate(folio)) {
- unsigned long nr_pages = folio_nr_pages(folio);
- unsigned long i;
-
- for (i = 0; i < nr_pages; i++)
- clear_highpage(folio_page(folio, i));
-
- folio_mark_uptodate(folio);
- }
+ WARN_ON(!IS_ALIGNED(slot->gmem.pgoff, 1 << folio_order(folio)));
+ index = gfn - slot->base_gfn + slot->gmem.pgoff;
+ index = ALIGN_DOWN(index, 1 << folio_order(folio));
+ r = __kvm_gmem_prepare_folio(kvm, slot, index, folio);
+ if (!r)
+ kvm_gmem_mark_prepared(folio);
- if (prepare) {
- int r = kvm_gmem_prepare_folio(inode, index, folio);
- if (r < 0) {
- folio_unlock(folio);
- folio_put(folio);
- return ERR_PTR(r);
- }
- }
+ return r;
+}
- /*
- * Ignore accessed, referenced, and dirty flags. The memory is
- * unevictable and there is no storage to write back to.
- */
- return folio;
+/*
+ * Returns a locked folio on success. The caller is responsible for
+ * setting the up-to-date flag before the memory is mapped into the guest.
+ * There is no backing storage for the memory, so the folio will remain
+ * up-to-date until it's removed.
+ *
+ * Ignore accessed, referenced, and dirty flags. The memory is
+ * unevictable and there is no storage to write back to.
+ */
+static struct folio *kvm_gmem_get_folio(struct inode *inode, pgoff_t index)
+{
+ /* TODO: Support huge pages. */
+ return filemap_grab_folio(inode->i_mapping, index);
}
static void kvm_gmem_invalidate_begin(struct kvm_gmem *gmem, pgoff_t start,
@@ -190,7 +199,7 @@ static long kvm_gmem_allocate(struct inode *inode, loff_t offset, loff_t len)
break;
}
- folio = kvm_gmem_get_folio(inode, index, true);
+ folio = kvm_gmem_get_folio(inode, index);
if (IS_ERR(folio)) {
r = PTR_ERR(folio);
break;
@@ -343,7 +352,7 @@ static int kvm_gmem_error_folio(struct address_space *mapping, struct folio *fol
return MF_DELAYED;
}
-#ifdef CONFIG_HAVE_KVM_GMEM_INVALIDATE
+#ifdef CONFIG_HAVE_KVM_ARCH_GMEM_INVALIDATE
static void kvm_gmem_free_folio(struct folio *folio)
{
struct page *page = folio_page(folio, 0);
@@ -358,7 +367,7 @@ static const struct address_space_operations kvm_gmem_aops = {
.dirty_folio = noop_dirty_folio,
.migrate_folio = kvm_gmem_migrate_folio,
.error_remove_folio = kvm_gmem_error_folio,
-#ifdef CONFIG_HAVE_KVM_GMEM_INVALIDATE
+#ifdef CONFIG_HAVE_KVM_ARCH_GMEM_INVALIDATE
.free_folio = kvm_gmem_free_folio,
#endif
};
@@ -541,64 +550,76 @@ void kvm_gmem_unbind(struct kvm_memory_slot *slot)
fput(file);
}
-static int __kvm_gmem_get_pfn(struct file *file, struct kvm_memory_slot *slot,
- gfn_t gfn, kvm_pfn_t *pfn, int *max_order, bool prepare)
+/* Returns a locked folio on success. */
+static struct folio *
+__kvm_gmem_get_pfn(struct file *file, struct kvm_memory_slot *slot,
+ gfn_t gfn, kvm_pfn_t *pfn, bool *is_prepared,
+ int *max_order)
{
pgoff_t index = gfn - slot->base_gfn + slot->gmem.pgoff;
struct kvm_gmem *gmem = file->private_data;
struct folio *folio;
- struct page *page;
- int r;
if (file != slot->gmem.file) {
WARN_ON_ONCE(slot->gmem.file);
- return -EFAULT;
+ return ERR_PTR(-EFAULT);
}
gmem = file->private_data;
if (xa_load(&gmem->bindings, index) != slot) {
WARN_ON_ONCE(xa_load(&gmem->bindings, index));
- return -EIO;
+ return ERR_PTR(-EIO);
}
- folio = kvm_gmem_get_folio(file_inode(file), index, prepare);
+ folio = kvm_gmem_get_folio(file_inode(file), index);
if (IS_ERR(folio))
- return PTR_ERR(folio);
+ return folio;
if (folio_test_hwpoison(folio)) {
folio_unlock(folio);
folio_put(folio);
- return -EHWPOISON;
+ return ERR_PTR(-EHWPOISON);
}
- page = folio_file_page(folio, index);
-
- *pfn = page_to_pfn(page);
+ *pfn = folio_file_pfn(folio, index);
if (max_order)
*max_order = 0;
- r = 0;
-
- folio_unlock(folio);
-
- return r;
+ *is_prepared = folio_test_uptodate(folio);
+ return folio;
}
int kvm_gmem_get_pfn(struct kvm *kvm, struct kvm_memory_slot *slot,
gfn_t gfn, kvm_pfn_t *pfn, int *max_order)
{
struct file *file = kvm_gmem_get_file(slot);
- int r;
+ struct folio *folio;
+ bool is_prepared = false;
+ int r = 0;
if (!file)
return -EFAULT;
- r = __kvm_gmem_get_pfn(file, slot, gfn, pfn, max_order, true);
+ folio = __kvm_gmem_get_pfn(file, slot, gfn, pfn, &is_prepared, max_order);
+ if (IS_ERR(folio)) {
+ r = PTR_ERR(folio);
+ goto out;
+ }
+
+ if (!is_prepared)
+ r = kvm_gmem_prepare_folio(kvm, slot, gfn, folio);
+
+ folio_unlock(folio);
+ if (r < 0)
+ folio_put(folio);
+
+out:
fput(file);
return r;
}
EXPORT_SYMBOL_GPL(kvm_gmem_get_pfn);
+#ifdef CONFIG_KVM_GENERIC_PRIVATE_MEM
long kvm_gmem_populate(struct kvm *kvm, gfn_t start_gfn, void __user *src, long npages,
kvm_gmem_populate_cb post_populate, void *opaque)
{
@@ -625,7 +646,9 @@ long kvm_gmem_populate(struct kvm *kvm, gfn_t start_gfn, void __user *src, long
npages = min_t(ulong, slot->npages - (start_gfn - slot->base_gfn), npages);
for (i = 0; i < npages; i += (1 << max_order)) {
+ struct folio *folio;
gfn_t gfn = start_gfn + i;
+ bool is_prepared = false;
kvm_pfn_t pfn;
if (signal_pending(current)) {
@@ -633,18 +656,39 @@ long kvm_gmem_populate(struct kvm *kvm, gfn_t start_gfn, void __user *src, long
break;
}
- ret = __kvm_gmem_get_pfn(file, slot, gfn, &pfn, &max_order, false);
- if (ret)
+ folio = __kvm_gmem_get_pfn(file, slot, gfn, &pfn, &is_prepared, &max_order);
+ if (IS_ERR(folio)) {
+ ret = PTR_ERR(folio);
break;
+ }
- if (!IS_ALIGNED(gfn, (1 << max_order)) ||
- (npages - i) < (1 << max_order))
- max_order = 0;
+ if (is_prepared) {
+ folio_unlock(folio);
+ folio_put(folio);
+ ret = -EEXIST;
+ break;
+ }
+
+ folio_unlock(folio);
+ WARN_ON(!IS_ALIGNED(gfn, 1 << max_order) ||
+ (npages - i) < (1 << max_order));
+
+ ret = -EINVAL;
+ while (!kvm_range_has_memory_attributes(kvm, gfn, gfn + (1 << max_order),
+ KVM_MEMORY_ATTRIBUTE_PRIVATE,
+ KVM_MEMORY_ATTRIBUTE_PRIVATE)) {
+ if (!max_order)
+ goto put_folio_and_exit;
+ max_order--;
+ }
p = src ? src + i * PAGE_SIZE : NULL;
ret = post_populate(kvm, gfn, pfn, p, max_order, opaque);
+ if (!ret)
+ kvm_gmem_mark_prepared(folio);
- put_page(pfn_to_page(pfn));
+put_folio_and_exit:
+ folio_put(folio);
if (ret)
break;
}
@@ -655,3 +699,4 @@ long kvm_gmem_populate(struct kvm *kvm, gfn_t start_gfn, void __user *src, long
return ret && !i ? ret : i;
}
EXPORT_SYMBOL_GPL(kvm_gmem_populate);
+#endif
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index d0788d0a72cc..92901656a0d4 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -2398,48 +2398,47 @@ static int kvm_vm_ioctl_clear_dirty_log(struct kvm *kvm,
#endif /* CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT */
#ifdef CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES
+static u64 kvm_supported_mem_attributes(struct kvm *kvm)
+{
+ if (!kvm || kvm_arch_has_private_mem(kvm))
+ return KVM_MEMORY_ATTRIBUTE_PRIVATE;
+
+ return 0;
+}
+
/*
* Returns true if _all_ gfns in the range [@start, @end) have attributes
- * matching @attrs.
+ * such that the bits in @mask match @attrs.
*/
bool kvm_range_has_memory_attributes(struct kvm *kvm, gfn_t start, gfn_t end,
- unsigned long attrs)
+ unsigned long mask, unsigned long attrs)
{
XA_STATE(xas, &kvm->mem_attr_array, start);
unsigned long index;
- bool has_attrs;
void *entry;
- rcu_read_lock();
+ mask &= kvm_supported_mem_attributes(kvm);
+ if (attrs & ~mask)
+ return false;
- if (!attrs) {
- has_attrs = !xas_find(&xas, end - 1);
- goto out;
- }
+ if (end == start + 1)
+ return (kvm_get_memory_attributes(kvm, start) & mask) == attrs;
+
+ guard(rcu)();
+ if (!attrs)
+ return !xas_find(&xas, end - 1);
- has_attrs = true;
for (index = start; index < end; index++) {
do {
entry = xas_next(&xas);
} while (xas_retry(&xas, entry));
- if (xas.xa_index != index || xa_to_value(entry) != attrs) {
- has_attrs = false;
- break;
- }
+ if (xas.xa_index != index ||
+ (xa_to_value(entry) & mask) != attrs)
+ return false;
}
-out:
- rcu_read_unlock();
- return has_attrs;
-}
-
-static u64 kvm_supported_mem_attributes(struct kvm *kvm)
-{
- if (!kvm || kvm_arch_has_private_mem(kvm))
- return KVM_MEMORY_ATTRIBUTE_PRIVATE;
-
- return 0;
+ return true;
}
static __always_inline void kvm_handle_gfn_range(struct kvm *kvm,
@@ -2534,7 +2533,7 @@ static int kvm_vm_set_mem_attributes(struct kvm *kvm, gfn_t start, gfn_t end,
mutex_lock(&kvm->slots_lock);
/* Nothing to do if the entire range as the desired attributes. */
- if (kvm_range_has_memory_attributes(kvm, start, end, attributes))
+ if (kvm_range_has_memory_attributes(kvm, start, end, ~0, attributes))
goto out_unlock;
/*